summary refs log tree commit diff stats
path: root/hw
diff options
context:
space:
mode:
Diffstat (limited to 'hw')
-rw-r--r--hw/intc/riscv_aclint.c144
-rw-r--r--hw/riscv/boot.c12
-rw-r--r--hw/riscv/opentitan.c36
-rw-r--r--hw/riscv/virt.c24
-rw-r--r--hw/ssi/ibex_spi_host.c612
-rw-r--r--hw/ssi/meson.build1
-rw-r--r--hw/ssi/trace-events7
7 files changed, 791 insertions, 45 deletions
diff --git a/hw/intc/riscv_aclint.c b/hw/intc/riscv_aclint.c
index e43b050e92..0412edc982 100644
--- a/hw/intc/riscv_aclint.c
+++ b/hw/intc/riscv_aclint.c
@@ -38,12 +38,18 @@ typedef struct riscv_aclint_mtimer_callback {
     int num;
 } riscv_aclint_mtimer_callback;
 
-static uint64_t cpu_riscv_read_rtc(uint32_t timebase_freq)
+static uint64_t cpu_riscv_read_rtc_raw(uint32_t timebase_freq)
 {
     return muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
         timebase_freq, NANOSECONDS_PER_SECOND);
 }
 
+static uint64_t cpu_riscv_read_rtc(void *opaque)
+{
+    RISCVAclintMTimerState *mtimer = opaque;
+    return cpu_riscv_read_rtc_raw(mtimer->timebase_freq) + mtimer->time_delta;
+}
+
 /*
  * Called when timecmp is written to update the QEMU timer or immediately
  * trigger timer interrupt if mtimecmp <= current timer value.
@@ -51,13 +57,13 @@ static uint64_t cpu_riscv_read_rtc(uint32_t timebase_freq)
 static void riscv_aclint_mtimer_write_timecmp(RISCVAclintMTimerState *mtimer,
                                               RISCVCPU *cpu,
                                               int hartid,
-                                              uint64_t value,
-                                              uint32_t timebase_freq)
+                                              uint64_t value)
 {
+    uint32_t timebase_freq = mtimer->timebase_freq;
     uint64_t next;
     uint64_t diff;
 
-    uint64_t rtc_r = cpu_riscv_read_rtc(timebase_freq);
+    uint64_t rtc_r = cpu_riscv_read_rtc(mtimer);
 
     cpu->env.timecmp = value;
     if (cpu->env.timecmp <= rtc_r) {
@@ -126,9 +132,9 @@ static uint64_t riscv_aclint_mtimer_read(void *opaque, hwaddr addr,
             qemu_log_mask(LOG_GUEST_ERROR,
                           "aclint-mtimer: invalid hartid: %zu", hartid);
         } else if ((addr & 0x7) == 0) {
-            /* timecmp_lo */
+            /* timecmp_lo for RV32/RV64 or timecmp for RV64 */
             uint64_t timecmp = env->timecmp;
-            return timecmp & 0xFFFFFFFF;
+            return (size == 4) ? (timecmp & 0xFFFFFFFF) : timecmp;
         } else if ((addr & 0x7) == 4) {
             /* timecmp_hi */
             uint64_t timecmp = env->timecmp;
@@ -139,11 +145,12 @@ static uint64_t riscv_aclint_mtimer_read(void *opaque, hwaddr addr,
             return 0;
         }
     } else if (addr == mtimer->time_base) {
-        /* time_lo */
-        return cpu_riscv_read_rtc(mtimer->timebase_freq) & 0xFFFFFFFF;
+        /* time_lo for RV32/RV64 or timecmp for RV64 */
+        uint64_t rtc = cpu_riscv_read_rtc(mtimer);
+        return (size == 4) ? (rtc & 0xFFFFFFFF) : rtc;
     } else if (addr == mtimer->time_base + 4) {
         /* time_hi */
-        return (cpu_riscv_read_rtc(mtimer->timebase_freq) >> 32) & 0xFFFFFFFF;
+        return (cpu_riscv_read_rtc(mtimer) >> 32) & 0xFFFFFFFF;
     }
 
     qemu_log_mask(LOG_UNIMP,
@@ -156,6 +163,7 @@ static void riscv_aclint_mtimer_write(void *opaque, hwaddr addr,
     uint64_t value, unsigned size)
 {
     RISCVAclintMTimerState *mtimer = opaque;
+    int i;
 
     if (addr >= mtimer->timecmp_base &&
         addr < (mtimer->timecmp_base + (mtimer->num_harts << 3))) {
@@ -167,33 +175,66 @@ static void riscv_aclint_mtimer_write(void *opaque, hwaddr addr,
             qemu_log_mask(LOG_GUEST_ERROR,
                           "aclint-mtimer: invalid hartid: %zu", hartid);
         } else if ((addr & 0x7) == 0) {
-            /* timecmp_lo */
-            uint64_t timecmp_hi = env->timecmp >> 32;
-            riscv_aclint_mtimer_write_timecmp(mtimer, RISCV_CPU(cpu), hartid,
-                timecmp_hi << 32 | (value & 0xFFFFFFFF),
-                mtimer->timebase_freq);
-            return;
+            if (size == 4) {
+                /* timecmp_lo for RV32/RV64 */
+                uint64_t timecmp_hi = env->timecmp >> 32;
+                riscv_aclint_mtimer_write_timecmp(mtimer, RISCV_CPU(cpu), hartid,
+                    timecmp_hi << 32 | (value & 0xFFFFFFFF));
+            } else {
+                /* timecmp for RV64 */
+                riscv_aclint_mtimer_write_timecmp(mtimer, RISCV_CPU(cpu), hartid,
+                                                  value);
+            }
         } else if ((addr & 0x7) == 4) {
-            /* timecmp_hi */
-            uint64_t timecmp_lo = env->timecmp;
-            riscv_aclint_mtimer_write_timecmp(mtimer, RISCV_CPU(cpu), hartid,
-                value << 32 | (timecmp_lo & 0xFFFFFFFF),
-                mtimer->timebase_freq);
+            if (size == 4) {
+                /* timecmp_hi for RV32/RV64 */
+                uint64_t timecmp_lo = env->timecmp;
+                riscv_aclint_mtimer_write_timecmp(mtimer, RISCV_CPU(cpu), hartid,
+                    value << 32 | (timecmp_lo & 0xFFFFFFFF));
+            } else {
+                qemu_log_mask(LOG_GUEST_ERROR,
+                              "aclint-mtimer: invalid timecmp_hi write: %08x",
+                              (uint32_t)addr);
+            }
         } else {
             qemu_log_mask(LOG_UNIMP,
                           "aclint-mtimer: invalid timecmp write: %08x",
                           (uint32_t)addr);
         }
         return;
-    } else if (addr == mtimer->time_base) {
-        /* time_lo */
-        qemu_log_mask(LOG_UNIMP,
-                      "aclint-mtimer: time_lo write not implemented");
-        return;
-    } else if (addr == mtimer->time_base + 4) {
-        /* time_hi */
-        qemu_log_mask(LOG_UNIMP,
-                      "aclint-mtimer: time_hi write not implemented");
+    } else if (addr == mtimer->time_base || addr == mtimer->time_base + 4) {
+        uint64_t rtc_r = cpu_riscv_read_rtc_raw(mtimer->timebase_freq);
+
+        if (addr == mtimer->time_base) {
+            if (size == 4) {
+                /* time_lo for RV32/RV64 */
+                mtimer->time_delta = ((rtc_r & ~0xFFFFFFFFULL) | value) - rtc_r;
+            } else {
+                /* time for RV64 */
+                mtimer->time_delta = value - rtc_r;
+            }
+        } else {
+            if (size == 4) {
+                /* time_hi for RV32/RV64 */
+                mtimer->time_delta = (value << 32 | (rtc_r & 0xFFFFFFFF)) - rtc_r;
+            } else {
+                qemu_log_mask(LOG_GUEST_ERROR,
+                              "aclint-mtimer: invalid time_hi write: %08x",
+                              (uint32_t)addr);
+                return;
+            }
+        }
+
+        /* Check if timer interrupt is triggered for each hart. */
+        for (i = 0; i < mtimer->num_harts; i++) {
+            CPUState *cpu = qemu_get_cpu(mtimer->hartid_base + i);
+            CPURISCVState *env = cpu ? cpu->env_ptr : NULL;
+            if (!env) {
+                continue;
+            }
+            riscv_aclint_mtimer_write_timecmp(mtimer, RISCV_CPU(cpu),
+                                              i, env->timecmp);
+        }
         return;
     }
 
@@ -208,6 +249,10 @@ static const MemoryRegionOps riscv_aclint_mtimer_ops = {
     .valid = {
         .min_access_size = 4,
         .max_access_size = 8
+    },
+    .impl = {
+        .min_access_size = 4,
+        .max_access_size = 8,
     }
 };
 
@@ -248,11 +293,29 @@ static void riscv_aclint_mtimer_realize(DeviceState *dev, Error **errp)
     }
 }
 
+static void riscv_aclint_mtimer_reset_enter(Object *obj, ResetType type)
+{
+    /*
+     * According to RISC-V ACLINT spec:
+     *   - On MTIMER device reset, the MTIME register is cleared to zero.
+     *   - On MTIMER device reset, the MTIMECMP registers are in unknown state.
+     */
+    RISCVAclintMTimerState *mtimer = RISCV_ACLINT_MTIMER(obj);
+
+    /*
+     * Clear mtime register by writing to 0 it.
+     * Pending mtime interrupts will also be cleared at the same time.
+     */
+    riscv_aclint_mtimer_write(mtimer, mtimer->time_base, 0, 8);
+}
+
 static void riscv_aclint_mtimer_class_init(ObjectClass *klass, void *data)
 {
     DeviceClass *dc = DEVICE_CLASS(klass);
     dc->realize = riscv_aclint_mtimer_realize;
     device_class_set_props(dc, riscv_aclint_mtimer_properties);
+    ResettableClass *rc = RESETTABLE_CLASS(klass);
+    rc->phases.enter = riscv_aclint_mtimer_reset_enter;
 }
 
 static const TypeInfo riscv_aclint_mtimer_info = {
@@ -299,7 +362,7 @@ DeviceState *riscv_aclint_mtimer_create(hwaddr addr, hwaddr size,
             continue;
         }
         if (provide_rdtime) {
-            riscv_cpu_set_rdtime_fn(env, cpu_riscv_read_rtc, timebase_freq);
+            riscv_cpu_set_rdtime_fn(env, cpu_riscv_read_rtc, dev);
         }
 
         cb->s = RISCV_ACLINT_MTIMER(dev);
@@ -407,11 +470,32 @@ static void riscv_aclint_swi_realize(DeviceState *dev, Error **errp)
     }
 }
 
+static void riscv_aclint_swi_reset_enter(Object *obj, ResetType type)
+{
+    /*
+     * According to RISC-V ACLINT spec:
+     *   - On MSWI device reset, each MSIP register is cleared to zero.
+     *
+     * p.s. SSWI device reset does nothing since SETSIP register always reads 0.
+     */
+    RISCVAclintSwiState *swi = RISCV_ACLINT_SWI(obj);
+    int i;
+
+    if (!swi->sswi) {
+        for (i = 0; i < swi->num_harts; i++) {
+            /* Clear MSIP registers by lowering software interrupts. */
+            qemu_irq_lower(swi->soft_irqs[i]);
+        }
+    }
+}
+
 static void riscv_aclint_swi_class_init(ObjectClass *klass, void *data)
 {
     DeviceClass *dc = DEVICE_CLASS(klass);
     dc->realize = riscv_aclint_swi_realize;
     device_class_set_props(dc, riscv_aclint_swi_properties);
+    ResettableClass *rc = RESETTABLE_CLASS(klass);
+    rc->phases.enter = riscv_aclint_swi_reset_enter;
 }
 
 static const TypeInfo riscv_aclint_swi_info = {
diff --git a/hw/riscv/boot.c b/hw/riscv/boot.c
index 0f179d3601..57a41df8e9 100644
--- a/hw/riscv/boot.c
+++ b/hw/riscv/boot.c
@@ -212,9 +212,9 @@ hwaddr riscv_load_initrd(const char *filename, uint64_t mem_size,
     return *start + size;
 }
 
-uint32_t riscv_load_fdt(hwaddr dram_base, uint64_t mem_size, void *fdt)
+uint64_t riscv_load_fdt(hwaddr dram_base, uint64_t mem_size, void *fdt)
 {
-    uint32_t temp, fdt_addr;
+    uint64_t temp, fdt_addr;
     hwaddr dram_end = dram_base + mem_size;
     int ret, fdtsize = fdt_totalsize(fdt);
 
@@ -229,7 +229,7 @@ uint32_t riscv_load_fdt(hwaddr dram_base, uint64_t mem_size, void *fdt)
      * Thus, put it at an 16MB aligned address that less than fdt size from the
      * end of dram or 3GB whichever is lesser.
      */
-    temp = MIN(dram_end, 3072 * MiB);
+    temp = (dram_base < 3072 * MiB) ? MIN(dram_end, 3072 * MiB) : dram_end;
     fdt_addr = QEMU_ALIGN_DOWN(temp - fdtsize, 16 * MiB);
 
     ret = fdt_pack(fdt);
@@ -285,13 +285,15 @@ void riscv_setup_rom_reset_vec(MachineState *machine, RISCVHartArrayState *harts
                                hwaddr start_addr,
                                hwaddr rom_base, hwaddr rom_size,
                                uint64_t kernel_entry,
-                               uint32_t fdt_load_addr, void *fdt)
+                               uint64_t fdt_load_addr, void *fdt)
 {
     int i;
     uint32_t start_addr_hi32 = 0x00000000;
+    uint32_t fdt_load_addr_hi32 = 0x00000000;
 
     if (!riscv_is_32bit(harts)) {
         start_addr_hi32 = start_addr >> 32;
+        fdt_load_addr_hi32 = fdt_load_addr >> 32;
     }
     /* reset vector */
     uint32_t reset_vec[10] = {
@@ -304,7 +306,7 @@ void riscv_setup_rom_reset_vec(MachineState *machine, RISCVHartArrayState *harts
         start_addr,                  /* start: .dword */
         start_addr_hi32,
         fdt_load_addr,               /* fdt_laddr: .dword */
-        0x00000000,
+        fdt_load_addr_hi32,
                                      /* fw_dyn: */
     };
     if (riscv_is_32bit(harts)) {
diff --git a/hw/riscv/opentitan.c b/hw/riscv/opentitan.c
index 833624d66c..2d401dcb23 100644
--- a/hw/riscv/opentitan.c
+++ b/hw/riscv/opentitan.c
@@ -120,11 +120,18 @@ static void lowrisc_ibex_soc_init(Object *obj)
     object_initialize_child(obj, "uart", &s->uart, TYPE_IBEX_UART);
 
     object_initialize_child(obj, "timer", &s->timer, TYPE_IBEX_TIMER);
+
+    for (int i = 0; i < OPENTITAN_NUM_SPI_HOSTS; i++) {
+        object_initialize_child(obj, "spi_host[*]", &s->spi_host[i],
+                                TYPE_IBEX_SPI_HOST);
+    }
 }
 
 static void lowrisc_ibex_soc_realize(DeviceState *dev_soc, Error **errp)
 {
     const MemMapEntry *memmap = ibex_memmap;
+    DeviceState *dev;
+    SysBusDevice *busdev;
     MachineState *ms = MACHINE(qdev_get_machine());
     LowRISCIbexSoCState *s = RISCV_IBEX_SOC(dev_soc);
     MemoryRegion *sys_mem = get_system_memory();
@@ -209,14 +216,35 @@ static void lowrisc_ibex_soc_realize(DeviceState *dev_soc, Error **errp)
                           qdev_get_gpio_in(DEVICE(qemu_get_cpu(0)),
                                            IRQ_M_TIMER));
 
+    /* SPI-Hosts */
+    for (int i = 0; i < OPENTITAN_NUM_SPI_HOSTS; ++i) {
+        dev = DEVICE(&(s->spi_host[i]));
+        if (!sysbus_realize(SYS_BUS_DEVICE(&s->spi_host[i]), errp)) {
+            return;
+        }
+        busdev = SYS_BUS_DEVICE(dev);
+        sysbus_mmio_map(busdev, 0, memmap[IBEX_DEV_SPI_HOST0 + i].base);
+
+        switch (i) {
+        case OPENTITAN_SPI_HOST0:
+            sysbus_connect_irq(busdev, 0, qdev_get_gpio_in(DEVICE(&s->plic),
+                                IBEX_SPI_HOST0_ERR_IRQ));
+            sysbus_connect_irq(busdev, 1, qdev_get_gpio_in(DEVICE(&s->plic),
+                                IBEX_SPI_HOST0_SPI_EVENT_IRQ));
+            break;
+        case OPENTITAN_SPI_HOST1:
+            sysbus_connect_irq(busdev, 0, qdev_get_gpio_in(DEVICE(&s->plic),
+                                IBEX_SPI_HOST1_ERR_IRQ));
+            sysbus_connect_irq(busdev, 1, qdev_get_gpio_in(DEVICE(&s->plic),
+                                IBEX_SPI_HOST1_SPI_EVENT_IRQ));
+            break;
+        }
+    }
+
     create_unimplemented_device("riscv.lowrisc.ibex.gpio",
         memmap[IBEX_DEV_GPIO].base, memmap[IBEX_DEV_GPIO].size);
     create_unimplemented_device("riscv.lowrisc.ibex.spi_device",
         memmap[IBEX_DEV_SPI_DEVICE].base, memmap[IBEX_DEV_SPI_DEVICE].size);
-    create_unimplemented_device("riscv.lowrisc.ibex.spi_host0",
-        memmap[IBEX_DEV_SPI_HOST0].base, memmap[IBEX_DEV_SPI_HOST0].size);
-    create_unimplemented_device("riscv.lowrisc.ibex.spi_host1",
-        memmap[IBEX_DEV_SPI_HOST1].base, memmap[IBEX_DEV_SPI_HOST1].size);
     create_unimplemented_device("riscv.lowrisc.ibex.i2c",
         memmap[IBEX_DEV_I2C].base, memmap[IBEX_DEV_I2C].size);
     create_unimplemented_device("riscv.lowrisc.ibex.pattgen",
diff --git a/hw/riscv/virt.c b/hw/riscv/virt.c
index da50cbed43..b49c5361bd 100644
--- a/hw/riscv/virt.c
+++ b/hw/riscv/virt.c
@@ -230,8 +230,14 @@ static void create_fdt_socket_cpus(RISCVVirtState *s, int socket,
         cpu_name = g_strdup_printf("/cpus/cpu@%d",
             s->soc[socket].hartid_base + cpu);
         qemu_fdt_add_subnode(mc->fdt, cpu_name);
-        qemu_fdt_setprop_string(mc->fdt, cpu_name, "mmu-type",
-            (is_32_bit) ? "riscv,sv32" : "riscv,sv48");
+        if (riscv_feature(&s->soc[socket].harts[cpu].env,
+                          RISCV_FEATURE_MMU)) {
+            qemu_fdt_setprop_string(mc->fdt, cpu_name, "mmu-type",
+                                    (is_32_bit) ? "riscv,sv32" : "riscv,sv48");
+        } else {
+            qemu_fdt_setprop_string(mc->fdt, cpu_name, "mmu-type",
+                                    "riscv,none");
+        }
         name = riscv_isa_string(&s->soc[socket].harts[cpu]);
         qemu_fdt_setprop_string(mc->fdt, cpu_name, "riscv,isa", name);
         g_free(name);
@@ -1308,12 +1314,18 @@ static void virt_machine_init(MachineState *machine)
 
     /*
      * Only direct boot kernel is currently supported for KVM VM,
-     * so the "-bios" parameter is ignored and treated like "-bios none"
-     * when KVM is enabled.
+     * so the "-bios" parameter is not supported when KVM is enabled.
      */
     if (kvm_enabled()) {
-        g_free(machine->firmware);
-        machine->firmware = g_strdup("none");
+        if (machine->firmware) {
+            if (strcmp(machine->firmware, "none")) {
+                error_report("Machine mode firmware is not supported in "
+                             "combination with KVM.");
+                exit(1);
+            }
+        } else {
+            machine->firmware = g_strdup("none");
+        }
     }
 
     if (riscv_is_32bit(&s->soc[0])) {
diff --git a/hw/ssi/ibex_spi_host.c b/hw/ssi/ibex_spi_host.c
new file mode 100644
index 0000000000..d14580b409
--- /dev/null
+++ b/hw/ssi/ibex_spi_host.c
@@ -0,0 +1,612 @@
+/*
+ * QEMU model of the Ibex SPI Controller
+ * SPEC Reference: https://docs.opentitan.org/hw/ip/spi_host/doc/
+ *
+ * Copyright (C) 2022 Western Digital
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/log.h"
+#include "qemu/module.h"
+#include "hw/ssi/ibex_spi_host.h"
+#include "hw/irq.h"
+#include "hw/qdev-properties.h"
+#include "hw/qdev-properties-system.h"
+#include "migration/vmstate.h"
+#include "trace.h"
+
+REG32(INTR_STATE, 0x00)
+    FIELD(INTR_STATE, ERROR, 0, 1)
+    FIELD(INTR_STATE, SPI_EVENT, 1, 1)
+REG32(INTR_ENABLE, 0x04)
+    FIELD(INTR_ENABLE, ERROR, 0, 1)
+    FIELD(INTR_ENABLE, SPI_EVENT, 1, 1)
+REG32(INTR_TEST, 0x08)
+    FIELD(INTR_TEST, ERROR, 0, 1)
+    FIELD(INTR_TEST, SPI_EVENT, 1, 1)
+REG32(ALERT_TEST, 0x0c)
+    FIELD(ALERT_TEST, FETAL_TEST, 0, 1)
+REG32(CONTROL, 0x10)
+    FIELD(CONTROL, RX_WATERMARK, 0, 8)
+    FIELD(CONTROL, TX_WATERMARK, 1, 8)
+    FIELD(CONTROL, OUTPUT_EN, 29, 1)
+    FIELD(CONTROL, SW_RST, 30, 1)
+    FIELD(CONTROL, SPIEN, 31, 1)
+REG32(STATUS, 0x14)
+    FIELD(STATUS, TXQD, 0, 8)
+    FIELD(STATUS, RXQD, 18, 8)
+    FIELD(STATUS, CMDQD, 16, 3)
+    FIELD(STATUS, RXWM, 20, 1)
+    FIELD(STATUS, BYTEORDER, 22, 1)
+    FIELD(STATUS, RXSTALL, 23, 1)
+    FIELD(STATUS, RXEMPTY, 24, 1)
+    FIELD(STATUS, RXFULL, 25, 1)
+    FIELD(STATUS, TXWM, 26, 1)
+    FIELD(STATUS, TXSTALL, 27, 1)
+    FIELD(STATUS, TXEMPTY, 28, 1)
+    FIELD(STATUS, TXFULL, 29, 1)
+    FIELD(STATUS, ACTIVE, 30, 1)
+    FIELD(STATUS, READY, 31, 1)
+REG32(CONFIGOPTS, 0x18)
+    FIELD(CONFIGOPTS, CLKDIV_0, 0, 16)
+    FIELD(CONFIGOPTS, CSNIDLE_0, 16, 4)
+    FIELD(CONFIGOPTS, CSNTRAIL_0, 20, 4)
+    FIELD(CONFIGOPTS, CSNLEAD_0, 24, 4)
+    FIELD(CONFIGOPTS, FULLCYC_0, 29, 1)
+    FIELD(CONFIGOPTS, CPHA_0, 30, 1)
+    FIELD(CONFIGOPTS, CPOL_0, 31, 1)
+REG32(CSID, 0x1c)
+    FIELD(CSID, CSID, 0, 32)
+REG32(COMMAND, 0x20)
+    FIELD(COMMAND, LEN, 0, 8)
+    FIELD(COMMAND, CSAAT, 9, 1)
+    FIELD(COMMAND, SPEED, 10, 2)
+    FIELD(COMMAND, DIRECTION, 12, 2)
+REG32(ERROR_ENABLE, 0x2c)
+    FIELD(ERROR_ENABLE, CMDBUSY, 0, 1)
+    FIELD(ERROR_ENABLE, OVERFLOW, 1, 1)
+    FIELD(ERROR_ENABLE, UNDERFLOW, 2, 1)
+    FIELD(ERROR_ENABLE, CMDINVAL, 3, 1)
+    FIELD(ERROR_ENABLE, CSIDINVAL, 4, 1)
+REG32(ERROR_STATUS, 0x30)
+    FIELD(ERROR_STATUS, CMDBUSY, 0, 1)
+    FIELD(ERROR_STATUS, OVERFLOW, 1, 1)
+    FIELD(ERROR_STATUS, UNDERFLOW, 2, 1)
+    FIELD(ERROR_STATUS, CMDINVAL, 3, 1)
+    FIELD(ERROR_STATUS, CSIDINVAL, 4, 1)
+    FIELD(ERROR_STATUS, ACCESSINVAL, 5, 1)
+REG32(EVENT_ENABLE, 0x30)
+    FIELD(EVENT_ENABLE, RXFULL, 0, 1)
+    FIELD(EVENT_ENABLE, TXEMPTY, 1, 1)
+    FIELD(EVENT_ENABLE, RXWM, 2, 1)
+    FIELD(EVENT_ENABLE, TXWM, 3, 1)
+    FIELD(EVENT_ENABLE, READY, 4, 1)
+    FIELD(EVENT_ENABLE, IDLE, 5, 1)
+
+static inline uint8_t div4_round_up(uint8_t dividend)
+{
+    return (dividend + 3) / 4;
+}
+
+static void ibex_spi_rxfifo_reset(IbexSPIHostState *s)
+{
+    /* Empty the RX FIFO and assert RXEMPTY */
+    fifo8_reset(&s->rx_fifo);
+    s->regs[IBEX_SPI_HOST_STATUS] &= ~R_STATUS_RXFULL_MASK;
+    s->regs[IBEX_SPI_HOST_STATUS] |= R_STATUS_RXEMPTY_MASK;
+}
+
+static void ibex_spi_txfifo_reset(IbexSPIHostState *s)
+{
+    /* Empty the TX FIFO and assert TXEMPTY */
+    fifo8_reset(&s->tx_fifo);
+    s->regs[IBEX_SPI_HOST_STATUS] &= ~R_STATUS_TXFULL_MASK;
+    s->regs[IBEX_SPI_HOST_STATUS] |= R_STATUS_TXEMPTY_MASK;
+}
+
+static void ibex_spi_host_reset(DeviceState *dev)
+{
+    IbexSPIHostState *s = IBEX_SPI_HOST(dev);
+    trace_ibex_spi_host_reset("Resetting Ibex SPI");
+
+    /* SPI Host Register Reset */
+    s->regs[IBEX_SPI_HOST_INTR_STATE]   = 0x00;
+    s->regs[IBEX_SPI_HOST_INTR_ENABLE]  = 0x00;
+    s->regs[IBEX_SPI_HOST_INTR_TEST]    = 0x00;
+    s->regs[IBEX_SPI_HOST_ALERT_TEST]   = 0x00;
+    s->regs[IBEX_SPI_HOST_CONTROL]      = 0x7f;
+    s->regs[IBEX_SPI_HOST_STATUS]       = 0x00;
+    s->regs[IBEX_SPI_HOST_CONFIGOPTS]   = 0x00;
+    s->regs[IBEX_SPI_HOST_CSID]         = 0x00;
+    s->regs[IBEX_SPI_HOST_COMMAND]      = 0x00;
+    /* RX/TX Modelled by FIFO */
+    s->regs[IBEX_SPI_HOST_RXDATA]       = 0x00;
+    s->regs[IBEX_SPI_HOST_TXDATA]       = 0x00;
+
+    s->regs[IBEX_SPI_HOST_ERROR_ENABLE] = 0x1F;
+    s->regs[IBEX_SPI_HOST_ERROR_STATUS] = 0x00;
+    s->regs[IBEX_SPI_HOST_EVENT_ENABLE] = 0x00;
+
+    ibex_spi_rxfifo_reset(s);
+    ibex_spi_txfifo_reset(s);
+
+    s->init_status = true;
+    return;
+}
+
+/*
+ * Check if we need to trigger an interrupt.
+ * The two interrupts lines (host_err and event) can
+ * be enabled separately in 'IBEX_SPI_HOST_INTR_ENABLE'.
+ *
+ * Interrupts are triggered based on the ones
+ * enabled in the `IBEX_SPI_HOST_EVENT_ENABLE` and `IBEX_SPI_HOST_ERROR_ENABLE`.
+ */
+static void ibex_spi_host_irq(IbexSPIHostState *s)
+{
+    bool error_en = s->regs[IBEX_SPI_HOST_INTR_ENABLE]
+                    & R_INTR_ENABLE_ERROR_MASK;
+    bool event_en = s->regs[IBEX_SPI_HOST_INTR_ENABLE]
+                    & R_INTR_ENABLE_SPI_EVENT_MASK;
+    bool err_pending = s->regs[IBEX_SPI_HOST_INTR_STATE]
+                        & R_INTR_STATE_ERROR_MASK;
+    bool status_pending = s->regs[IBEX_SPI_HOST_INTR_STATE]
+                        & R_INTR_STATE_SPI_EVENT_MASK;
+    int err_irq = 0, event_irq = 0;
+
+    /* Error IRQ enabled and Error IRQ Cleared*/
+    if (error_en && !err_pending) {
+        /* Event enabled, Interrupt Test Error */
+        if (s->regs[IBEX_SPI_HOST_INTR_TEST] & R_INTR_TEST_ERROR_MASK) {
+            err_irq = 1;
+        } else if ((s->regs[IBEX_SPI_HOST_ERROR_ENABLE]
+                    &  R_ERROR_ENABLE_CMDBUSY_MASK) &&
+                    s->regs[IBEX_SPI_HOST_ERROR_STATUS]
+                    & R_ERROR_STATUS_CMDBUSY_MASK) {
+            /* Wrote to COMMAND when not READY */
+            err_irq = 1;
+        } else if ((s->regs[IBEX_SPI_HOST_ERROR_ENABLE]
+                    &  R_ERROR_ENABLE_CMDINVAL_MASK) &&
+                    s->regs[IBEX_SPI_HOST_ERROR_STATUS]
+                    & R_ERROR_STATUS_CMDINVAL_MASK) {
+            /* Invalid command segment */
+            err_irq = 1;
+        } else if ((s->regs[IBEX_SPI_HOST_ERROR_ENABLE]
+                    & R_ERROR_ENABLE_CSIDINVAL_MASK) &&
+                    s->regs[IBEX_SPI_HOST_ERROR_STATUS]
+                    & R_ERROR_STATUS_CSIDINVAL_MASK) {
+            /* Invalid value for CSID */
+            err_irq = 1;
+        }
+        if (err_irq) {
+            s->regs[IBEX_SPI_HOST_INTR_STATE] |= R_INTR_STATE_ERROR_MASK;
+        }
+        qemu_set_irq(s->host_err, err_irq);
+    }
+
+    /* Event IRQ Enabled and Event IRQ Cleared */
+    if (event_en && !status_pending) {
+        if (s->regs[IBEX_SPI_HOST_INTR_TEST] & R_INTR_TEST_SPI_EVENT_MASK) {
+            /* Event enabled, Interrupt Test Event */
+            event_irq = 1;
+        } else if ((s->regs[IBEX_SPI_HOST_EVENT_ENABLE]
+                    & R_EVENT_ENABLE_READY_MASK) &&
+                    (s->regs[IBEX_SPI_HOST_STATUS] & R_STATUS_READY_MASK)) {
+            /* SPI Host ready for next command */
+            event_irq = 1;
+        } else if ((s->regs[IBEX_SPI_HOST_EVENT_ENABLE]
+                    & R_EVENT_ENABLE_TXEMPTY_MASK) &&
+                    (s->regs[IBEX_SPI_HOST_STATUS] & R_STATUS_TXEMPTY_MASK)) {
+            /* SPI TXEMPTY, TXFIFO drained */
+            event_irq = 1;
+        } else if ((s->regs[IBEX_SPI_HOST_EVENT_ENABLE]
+                    & R_EVENT_ENABLE_RXFULL_MASK) &&
+                    (s->regs[IBEX_SPI_HOST_STATUS] & R_STATUS_RXFULL_MASK)) {
+            /* SPI RXFULL, RXFIFO  full */
+            event_irq = 1;
+        }
+        if (event_irq) {
+            s->regs[IBEX_SPI_HOST_INTR_STATE] |= R_INTR_STATE_SPI_EVENT_MASK;
+        }
+        qemu_set_irq(s->event, event_irq);
+    }
+}
+
+static void ibex_spi_host_transfer(IbexSPIHostState *s)
+{
+    uint32_t rx, tx;
+    /* Get num of one byte transfers */
+    uint8_t segment_len = ((s->regs[IBEX_SPI_HOST_COMMAND] & R_COMMAND_LEN_MASK)
+                          >> R_COMMAND_LEN_SHIFT);
+    while (segment_len > 0) {
+        if (fifo8_is_empty(&s->tx_fifo)) {
+            /* Assert Stall */
+            s->regs[IBEX_SPI_HOST_STATUS] |= R_STATUS_TXSTALL_MASK;
+            break;
+        } else if (fifo8_is_full(&s->rx_fifo)) {
+            /* Assert Stall */
+            s->regs[IBEX_SPI_HOST_STATUS] |= R_STATUS_RXSTALL_MASK;
+            break;
+        } else {
+            tx = fifo8_pop(&s->tx_fifo);
+        }
+
+        rx = ssi_transfer(s->ssi, tx);
+
+        trace_ibex_spi_host_transfer(tx, rx);
+
+        if (!fifo8_is_full(&s->rx_fifo)) {
+            fifo8_push(&s->rx_fifo, rx);
+        } else {
+            /* Assert RXFULL */
+            s->regs[IBEX_SPI_HOST_STATUS] |= R_STATUS_RXFULL_MASK;
+        }
+        --segment_len;
+    }
+
+    /* Assert Ready */
+    s->regs[IBEX_SPI_HOST_STATUS] |= R_STATUS_READY_MASK;
+    /* Set RXQD */
+    s->regs[IBEX_SPI_HOST_STATUS] &= ~R_STATUS_RXQD_MASK;
+    s->regs[IBEX_SPI_HOST_STATUS] |= (R_STATUS_RXQD_MASK
+                                    & div4_round_up(segment_len));
+    /* Set TXQD */
+    s->regs[IBEX_SPI_HOST_STATUS] &= ~R_STATUS_TXQD_MASK;
+    s->regs[IBEX_SPI_HOST_STATUS] |= (fifo8_num_used(&s->tx_fifo) / 4)
+                                    & R_STATUS_TXQD_MASK;
+    /* Clear TXFULL */
+    s->regs[IBEX_SPI_HOST_STATUS] &= ~R_STATUS_TXFULL_MASK;
+    /* Assert TXEMPTY and drop remaining bytes that exceed segment_len */
+    ibex_spi_txfifo_reset(s);
+    /* Reset RXEMPTY */
+    s->regs[IBEX_SPI_HOST_STATUS] &= ~R_STATUS_RXEMPTY_MASK;
+
+    ibex_spi_host_irq(s);
+}
+
+static uint64_t ibex_spi_host_read(void *opaque, hwaddr addr,
+                                     unsigned int size)
+{
+    IbexSPIHostState *s = opaque;
+    uint32_t rc = 0;
+    uint8_t rx_byte = 0;
+
+    trace_ibex_spi_host_read(addr, size);
+
+    /* Match reg index */
+    addr = addr >> 2;
+    switch (addr) {
+    /* Skipping any W/O registers */
+    case IBEX_SPI_HOST_INTR_STATE...IBEX_SPI_HOST_INTR_ENABLE:
+    case IBEX_SPI_HOST_CONTROL...IBEX_SPI_HOST_STATUS:
+        rc = s->regs[addr];
+        break;
+    case IBEX_SPI_HOST_CSID:
+        rc = s->regs[addr];
+        break;
+    case IBEX_SPI_HOST_CONFIGOPTS:
+        rc = s->config_opts[s->regs[IBEX_SPI_HOST_CSID]];
+        break;
+    case IBEX_SPI_HOST_TXDATA:
+        rc = s->regs[addr];
+        break;
+    case IBEX_SPI_HOST_RXDATA:
+        /* Clear RXFULL */
+        s->regs[IBEX_SPI_HOST_STATUS] &= ~R_STATUS_RXFULL_MASK;
+
+        for (int i = 0; i < 4; ++i) {
+            if (fifo8_is_empty(&s->rx_fifo)) {
+                /* Assert RXEMPTY, no IRQ */
+                s->regs[IBEX_SPI_HOST_STATUS] |= R_STATUS_RXEMPTY_MASK;
+                s->regs[IBEX_SPI_HOST_ERROR_STATUS] |=
+                                                R_ERROR_STATUS_UNDERFLOW_MASK;
+                return rc;
+            }
+            rx_byte = fifo8_pop(&s->rx_fifo);
+            rc |= rx_byte << (i * 8);
+        }
+        break;
+    case IBEX_SPI_HOST_ERROR_ENABLE...IBEX_SPI_HOST_EVENT_ENABLE:
+        rc = s->regs[addr];
+        break;
+    default:
+        qemu_log_mask(LOG_GUEST_ERROR, "Bad offset 0x%" HWADDR_PRIx "\n",
+                      addr << 2);
+    }
+    return rc;
+}
+
+
+static void ibex_spi_host_write(void *opaque, hwaddr addr,
+                                uint64_t val64, unsigned int size)
+{
+    IbexSPIHostState *s = opaque;
+    uint32_t val32 = val64;
+    uint32_t shift_mask = 0xff;
+    uint8_t txqd_len;
+
+    trace_ibex_spi_host_write(addr, size, val64);
+
+    /* Match reg index */
+    addr = addr >> 2;
+
+    switch (addr) {
+    /* Skipping any R/O registers */
+    case IBEX_SPI_HOST_INTR_STATE...IBEX_SPI_HOST_INTR_ENABLE:
+        s->regs[addr] = val32;
+        break;
+    case IBEX_SPI_HOST_INTR_TEST:
+        s->regs[addr] = val32;
+        ibex_spi_host_irq(s);
+        break;
+    case IBEX_SPI_HOST_ALERT_TEST:
+        s->regs[addr] = val32;
+        qemu_log_mask(LOG_UNIMP,
+                        "%s: SPI_ALERT_TEST is not supported\n", __func__);
+        break;
+    case IBEX_SPI_HOST_CONTROL:
+        s->regs[addr] = val32;
+
+        if (val32 & R_CONTROL_SW_RST_MASK)  {
+            ibex_spi_host_reset((DeviceState *)s);
+            /* Clear active if any */
+            s->regs[IBEX_SPI_HOST_STATUS] &=  ~R_STATUS_ACTIVE_MASK;
+        }
+
+        if (val32 & R_CONTROL_OUTPUT_EN_MASK)  {
+            qemu_log_mask(LOG_UNIMP,
+                          "%s: CONTROL_OUTPUT_EN is not supported\n", __func__);
+        }
+        break;
+    case IBEX_SPI_HOST_CONFIGOPTS:
+        /* Update the respective config-opts register based on CSIDth index */
+        s->config_opts[s->regs[IBEX_SPI_HOST_CSID]] = val32;
+        qemu_log_mask(LOG_UNIMP,
+                      "%s: CONFIGOPTS Hardware settings not supported\n",
+                         __func__);
+        break;
+    case IBEX_SPI_HOST_CSID:
+        if (val32 >= s->num_cs) {
+            /* CSID exceeds max num_cs */
+            s->regs[IBEX_SPI_HOST_ERROR_STATUS] |=
+                                                R_ERROR_STATUS_CSIDINVAL_MASK;
+            ibex_spi_host_irq(s);
+            return;
+        }
+        s->regs[addr] = val32;
+        break;
+    case IBEX_SPI_HOST_COMMAND:
+        s->regs[addr] = val32;
+
+        /* STALL, IP not enabled */
+        if (!(s->regs[IBEX_SPI_HOST_CONTROL] & R_CONTROL_SPIEN_MASK)) {
+            return;
+        }
+
+        /* SPI not ready, IRQ Error */
+        if (!(s->regs[IBEX_SPI_HOST_STATUS] & R_STATUS_READY_MASK)) {
+            s->regs[IBEX_SPI_HOST_ERROR_STATUS] |= R_ERROR_STATUS_CMDBUSY_MASK;
+            ibex_spi_host_irq(s);
+            return;
+        }
+        /* Assert Not Ready */
+        s->regs[IBEX_SPI_HOST_STATUS] &= ~R_STATUS_READY_MASK;
+
+        if (((val32 & R_COMMAND_DIRECTION_MASK) >> R_COMMAND_DIRECTION_SHIFT)
+            != BIDIRECTIONAL_TRANSFER) {
+                qemu_log_mask(LOG_UNIMP,
+                          "%s: Rx Only/Tx Only are not supported\n", __func__);
+        }
+
+        if (val32 & R_COMMAND_CSAAT_MASK)  {
+            qemu_log_mask(LOG_UNIMP,
+                          "%s: CSAAT is not supported\n", __func__);
+        }
+        if (val32 & R_COMMAND_SPEED_MASK)  {
+            qemu_log_mask(LOG_UNIMP,
+                          "%s: SPEED is not supported\n", __func__);
+        }
+
+        /* Set Transfer Callback */
+        timer_mod(s->fifo_trigger_handle,
+                    qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
+                    (TX_INTERRUPT_TRIGGER_DELAY_NS));
+
+        break;
+    case IBEX_SPI_HOST_TXDATA:
+        /*
+         * This is a hardware `feature` where
+         * the first word written TXDATA after init is omitted entirely
+         */
+        if (s->init_status) {
+            s->init_status = false;
+            return;
+        }
+
+        for (int i = 0; i < 4; ++i) {
+            /* Attempting to write when TXFULL */
+            if (fifo8_is_full(&s->tx_fifo)) {
+                /* Assert RXEMPTY, no IRQ */
+                s->regs[IBEX_SPI_HOST_STATUS] |= R_STATUS_TXFULL_MASK;
+                s->regs[IBEX_SPI_HOST_ERROR_STATUS] |=
+                                                 R_ERROR_STATUS_OVERFLOW_MASK;
+                ibex_spi_host_irq(s);
+                return;
+            }
+            /* Byte ordering is set by the IP */
+            if ((s->regs[IBEX_SPI_HOST_STATUS] &
+                R_STATUS_BYTEORDER_MASK) == 0) {
+                /* LE: LSB transmitted first (default for ibex processor) */
+                shift_mask = 0xff << (i * 8);
+            } else {
+                /* BE: MSB transmitted first */
+                qemu_log_mask(LOG_UNIMP,
+                             "%s: Big endian is not supported\n", __func__);
+            }
+
+            fifo8_push(&s->tx_fifo, (val32 & shift_mask) >> (i * 8));
+        }
+
+        /* Reset TXEMPTY */
+        s->regs[IBEX_SPI_HOST_STATUS] &= ~R_STATUS_TXEMPTY_MASK;
+        /* Update TXQD */
+        txqd_len = (s->regs[IBEX_SPI_HOST_STATUS] &
+                    R_STATUS_TXQD_MASK) >> R_STATUS_TXQD_SHIFT;
+        /* Partial bytes (size < 4) are padded, in words. */
+        txqd_len += 1;
+        s->regs[IBEX_SPI_HOST_STATUS] &= ~R_STATUS_TXQD_MASK;
+        s->regs[IBEX_SPI_HOST_STATUS] |= txqd_len;
+        /* Assert Ready */
+        s->regs[IBEX_SPI_HOST_STATUS] |= R_STATUS_READY_MASK;
+        break;
+    case IBEX_SPI_HOST_ERROR_ENABLE:
+        s->regs[addr] = val32;
+
+        if (val32 & R_ERROR_ENABLE_CMDINVAL_MASK)  {
+            qemu_log_mask(LOG_UNIMP,
+                          "%s: Segment Length is not supported\n", __func__);
+        }
+        break;
+    case IBEX_SPI_HOST_ERROR_STATUS:
+    /*
+     *  Indicates that any errors that have occurred.
+     *  When an error occurs, the corresponding bit must be cleared
+     *  here before issuing any further commands
+     */
+        s->regs[addr] = val32;
+        break;
+    case IBEX_SPI_HOST_EVENT_ENABLE:
+    /* Controls which classes of SPI events raise an interrupt. */
+        s->regs[addr] = val32;
+
+        if (val32 & R_EVENT_ENABLE_RXWM_MASK)  {
+            qemu_log_mask(LOG_UNIMP,
+                          "%s: RXWM is not supported\n", __func__);
+        }
+        if (val32 & R_EVENT_ENABLE_TXWM_MASK)  {
+            qemu_log_mask(LOG_UNIMP,
+                          "%s: TXWM is not supported\n", __func__);
+        }
+
+        if (val32 & R_EVENT_ENABLE_IDLE_MASK)  {
+            qemu_log_mask(LOG_UNIMP,
+                          "%s: IDLE is not supported\n", __func__);
+        }
+        break;
+    default:
+        qemu_log_mask(LOG_GUEST_ERROR, "Bad offset 0x%" HWADDR_PRIx "\n",
+                      addr << 2);
+    }
+}
+
+static const MemoryRegionOps ibex_spi_ops = {
+    .read = ibex_spi_host_read,
+    .write = ibex_spi_host_write,
+    /* Ibex default LE */
+    .endianness = DEVICE_LITTLE_ENDIAN,
+};
+
+static Property ibex_spi_properties[] = {
+    DEFINE_PROP_UINT32("num_cs", IbexSPIHostState, num_cs, 1),
+    DEFINE_PROP_END_OF_LIST(),
+};
+
+static const VMStateDescription vmstate_ibex = {
+    .name = TYPE_IBEX_SPI_HOST,
+    .version_id = 1,
+    .minimum_version_id = 1,
+    .fields = (VMStateField[]) {
+        VMSTATE_UINT32_ARRAY(regs, IbexSPIHostState, IBEX_SPI_HOST_MAX_REGS),
+        VMSTATE_VARRAY_UINT32(config_opts, IbexSPIHostState,
+                              num_cs, 0, vmstate_info_uint32, uint32_t),
+        VMSTATE_FIFO8(rx_fifo, IbexSPIHostState),
+        VMSTATE_FIFO8(tx_fifo, IbexSPIHostState),
+        VMSTATE_TIMER_PTR(fifo_trigger_handle, IbexSPIHostState),
+        VMSTATE_BOOL(init_status, IbexSPIHostState),
+        VMSTATE_END_OF_LIST()
+    }
+};
+
+static void fifo_trigger_update(void *opaque)
+{
+    IbexSPIHostState *s = opaque;
+    ibex_spi_host_transfer(s);
+}
+
+static void ibex_spi_host_realize(DeviceState *dev, Error **errp)
+{
+    IbexSPIHostState *s = IBEX_SPI_HOST(dev);
+    int i;
+
+    s->ssi = ssi_create_bus(dev, "ssi");
+    s->cs_lines = g_new0(qemu_irq, s->num_cs);
+
+    for (i = 0; i < s->num_cs; ++i) {
+        sysbus_init_irq(SYS_BUS_DEVICE(dev), &s->cs_lines[i]);
+    }
+
+    /* Setup CONFIGOPTS Multi-register */
+    s->config_opts = g_new0(uint32_t, s->num_cs);
+
+    /* Setup FIFO Interrupt Timer */
+    s->fifo_trigger_handle = timer_new_ns(QEMU_CLOCK_VIRTUAL,
+                                          fifo_trigger_update, s);
+
+    /* FIFO sizes as per OT Spec */
+    fifo8_create(&s->tx_fifo, IBEX_SPI_HOST_TXFIFO_LEN);
+    fifo8_create(&s->rx_fifo, IBEX_SPI_HOST_RXFIFO_LEN);
+}
+
+static void ibex_spi_host_init(Object *obj)
+{
+    IbexSPIHostState *s = IBEX_SPI_HOST(obj);
+
+    sysbus_init_irq(SYS_BUS_DEVICE(obj), &s->host_err);
+    sysbus_init_irq(SYS_BUS_DEVICE(obj), &s->event);
+
+    memory_region_init_io(&s->mmio, obj, &ibex_spi_ops, s,
+                          TYPE_IBEX_SPI_HOST, 0x1000);
+    sysbus_init_mmio(SYS_BUS_DEVICE(obj), &s->mmio);
+}
+
+static void ibex_spi_host_class_init(ObjectClass *klass, void *data)
+{
+    DeviceClass *dc = DEVICE_CLASS(klass);
+    dc->realize = ibex_spi_host_realize;
+    dc->reset = ibex_spi_host_reset;
+    dc->vmsd = &vmstate_ibex;
+    device_class_set_props(dc, ibex_spi_properties);
+}
+
+static const TypeInfo ibex_spi_host_info = {
+    .name          = TYPE_IBEX_SPI_HOST,
+    .parent        = TYPE_SYS_BUS_DEVICE,
+    .instance_size = sizeof(IbexSPIHostState),
+    .instance_init = ibex_spi_host_init,
+    .class_init    = ibex_spi_host_class_init,
+};
+
+static void ibex_spi_host_register_types(void)
+{
+    type_register_static(&ibex_spi_host_info);
+}
+
+type_init(ibex_spi_host_register_types)
diff --git a/hw/ssi/meson.build b/hw/ssi/meson.build
index 0ded9cd092..702aa5e4df 100644
--- a/hw/ssi/meson.build
+++ b/hw/ssi/meson.build
@@ -10,3 +10,4 @@ softmmu_ss.add(when: 'CONFIG_XILINX_SPIPS', if_true: files('xilinx_spips.c'))
 softmmu_ss.add(when: 'CONFIG_XLNX_VERSAL', if_true: files('xlnx-versal-ospi.c'))
 softmmu_ss.add(when: 'CONFIG_IMX', if_true: files('imx_spi.c'))
 softmmu_ss.add(when: 'CONFIG_OMAP', if_true: files('omap_spi.c'))
+softmmu_ss.add(when: 'CONFIG_IBEX', if_true: files('ibex_spi_host.c'))
diff --git a/hw/ssi/trace-events b/hw/ssi/trace-events
index 612d3d6087..c707d4aaba 100644
--- a/hw/ssi/trace-events
+++ b/hw/ssi/trace-events
@@ -20,3 +20,10 @@ npcm7xx_fiu_ctrl_read(const char *id, uint64_t addr, uint32_t data) "%s offset:
 npcm7xx_fiu_ctrl_write(const char *id, uint64_t addr, uint32_t data) "%s offset: 0x%04" PRIx64 " value: 0x%08" PRIx32
 npcm7xx_fiu_flash_read(const char *id, int cs, uint64_t addr, unsigned int size, uint64_t value) "%s[%d] offset: 0x%08" PRIx64 " size: %u value: 0x%" PRIx64
 npcm7xx_fiu_flash_write(const char *id, unsigned cs, uint64_t addr, unsigned int size, uint64_t value) "%s[%d] offset: 0x%08" PRIx64 " size: %u value: 0x%" PRIx64
+
+# ibex_spi_host.c
+
+ibex_spi_host_reset(const char *msg) "%s"
+ibex_spi_host_transfer(uint32_t tx_data, uint32_t rx_data) "tx_data: 0x%" PRIx32 " rx_data: @0x%" PRIx32
+ibex_spi_host_write(uint64_t addr, uint32_t size, uint64_t data) "@0x%" PRIx64 " size %u: 0x%" PRIx64
+ibex_spi_host_read(uint64_t addr, uint32_t size) "@0x%" PRIx64 " size %u:"