diff options
Diffstat (limited to 'hw')
| -rw-r--r-- | hw/arm/smmuv3.c | 2 | ||||
| -rw-r--r-- | hw/arm/virt.c | 102 | ||||
| -rw-r--r-- | hw/char/escc.c | 2 | ||||
| -rw-r--r-- | hw/intc/Kconfig | 3 | ||||
| -rw-r--r-- | hw/intc/arm_gicv3_common.c | 54 | ||||
| -rw-r--r-- | hw/intc/arm_gicv3_cpuif.c | 195 | ||||
| -rw-r--r-- | hw/intc/arm_gicv3_dist.c | 7 | ||||
| -rw-r--r-- | hw/intc/arm_gicv3_its.c | 776 | ||||
| -rw-r--r-- | hw/intc/arm_gicv3_its_kvm.c | 2 | ||||
| -rw-r--r-- | hw/intc/arm_gicv3_kvm.c | 5 | ||||
| -rw-r--r-- | hw/intc/arm_gicv3_redist.c | 480 | ||||
| -rw-r--r-- | hw/intc/gicv3_internal.h | 213 | ||||
| -rw-r--r-- | hw/intc/meson.build | 1 | ||||
| -rw-r--r-- | hw/intc/nios2_vic.c | 313 | ||||
| -rw-r--r-- | hw/intc/riscv_aclint.c | 144 | ||||
| -rw-r--r-- | hw/intc/trace-events | 18 | ||||
| -rw-r--r-- | hw/nios2/10m50_devboard.c | 115 | ||||
| -rw-r--r-- | hw/nios2/Kconfig | 1 | ||||
| -rw-r--r-- | hw/rdma/vmw/pvrdma_main.c | 9 | ||||
| -rw-r--r-- | hw/riscv/boot.c | 12 | ||||
| -rw-r--r-- | hw/riscv/opentitan.c | 36 | ||||
| -rw-r--r-- | hw/riscv/virt.c | 24 | ||||
| -rw-r--r-- | hw/ssi/ibex_spi_host.c | 612 | ||||
| -rw-r--r-- | hw/ssi/meson.build | 1 | ||||
| -rw-r--r-- | hw/ssi/trace-events | 7 | ||||
| -rw-r--r-- | hw/virtio/trace-events | 2 | ||||
| -rw-r--r-- | hw/virtio/vhost-vdpa.c | 2 |
27 files changed, 2793 insertions, 345 deletions
diff --git a/hw/arm/smmuv3.c b/hw/arm/smmuv3.c index 674623aabe..707eb430c2 100644 --- a/hw/arm/smmuv3.c +++ b/hw/arm/smmuv3.c @@ -760,7 +760,7 @@ epilogue: qemu_mutex_unlock(&s->mutex); switch (status) { case SMMU_TRANS_SUCCESS: - entry.perm = flag; + entry.perm = cached_entry->entry.perm; entry.translated_addr = cached_entry->entry.translated_addr + (addr & cached_entry->entry.addr_mask); entry.addr_mask = cached_entry->entry.addr_mask; diff --git a/hw/arm/virt.c b/hw/arm/virt.c index bb6a2484d8..5bdd98e4a1 100644 --- a/hw/arm/virt.c +++ b/hw/arm/virt.c @@ -522,7 +522,7 @@ static void fdt_add_gic_node(VirtMachineState *vms) qemu_fdt_setprop_cell(ms->fdt, nodename, "#address-cells", 0x2); qemu_fdt_setprop_cell(ms->fdt, nodename, "#size-cells", 0x2); qemu_fdt_setprop(ms->fdt, nodename, "ranges", NULL, 0); - if (vms->gic_version == VIRT_GIC_VERSION_3) { + if (vms->gic_version != VIRT_GIC_VERSION_2) { int nb_redist_regions = virt_gicv3_redist_region_count(vms); qemu_fdt_setprop_string(ms->fdt, nodename, "compatible", @@ -690,14 +690,32 @@ static void create_gic(VirtMachineState *vms, MemoryRegion *mem) /* We create a standalone GIC */ SysBusDevice *gicbusdev; const char *gictype; - int type = vms->gic_version, i; + int i; unsigned int smp_cpus = ms->smp.cpus; uint32_t nb_redist_regions = 0; + int revision; - gictype = (type == 3) ? gicv3_class_name() : gic_class_name(); + if (vms->gic_version == VIRT_GIC_VERSION_2) { + gictype = gic_class_name(); + } else { + gictype = gicv3_class_name(); + } + switch (vms->gic_version) { + case VIRT_GIC_VERSION_2: + revision = 2; + break; + case VIRT_GIC_VERSION_3: + revision = 3; + break; + case VIRT_GIC_VERSION_4: + revision = 4; + break; + default: + g_assert_not_reached(); + } vms->gic = qdev_new(gictype); - qdev_prop_set_uint32(vms->gic, "revision", type); + qdev_prop_set_uint32(vms->gic, "revision", revision); qdev_prop_set_uint32(vms->gic, "num-cpu", smp_cpus); /* Note that the num-irq property counts both internal and external * interrupts; there are always 32 of the former (mandated by GIC spec). @@ -707,9 +725,8 @@ static void create_gic(VirtMachineState *vms, MemoryRegion *mem) qdev_prop_set_bit(vms->gic, "has-security-extensions", vms->secure); } - if (type == 3) { - uint32_t redist0_capacity = - vms->memmap[VIRT_GIC_REDIST].size / GICV3_REDIST_SIZE; + if (vms->gic_version != VIRT_GIC_VERSION_2) { + uint32_t redist0_capacity = virt_redist_capacity(vms, VIRT_GIC_REDIST); uint32_t redist0_count = MIN(smp_cpus, redist0_capacity); nb_redist_regions = virt_gicv3_redist_region_count(vms); @@ -728,7 +745,7 @@ static void create_gic(VirtMachineState *vms, MemoryRegion *mem) if (nb_redist_regions == 2) { uint32_t redist1_capacity = - vms->memmap[VIRT_HIGH_GIC_REDIST2].size / GICV3_REDIST_SIZE; + virt_redist_capacity(vms, VIRT_HIGH_GIC_REDIST2); qdev_prop_set_uint32(vms->gic, "redist-region-count[1]", MIN(smp_cpus - redist0_count, redist1_capacity)); @@ -742,7 +759,7 @@ static void create_gic(VirtMachineState *vms, MemoryRegion *mem) gicbusdev = SYS_BUS_DEVICE(vms->gic); sysbus_realize_and_unref(gicbusdev, &error_fatal); sysbus_mmio_map(gicbusdev, 0, vms->memmap[VIRT_GIC_DIST].base); - if (type == 3) { + if (vms->gic_version != VIRT_GIC_VERSION_2) { sysbus_mmio_map(gicbusdev, 1, vms->memmap[VIRT_GIC_REDIST].base); if (nb_redist_regions == 2) { sysbus_mmio_map(gicbusdev, 2, @@ -780,7 +797,7 @@ static void create_gic(VirtMachineState *vms, MemoryRegion *mem) ppibase + timer_irq[irq])); } - if (type == 3) { + if (vms->gic_version != VIRT_GIC_VERSION_2) { qemu_irq irq = qdev_get_gpio_in(vms->gic, ppibase + ARCH_GIC_MAINT_IRQ); qdev_connect_gpio_out_named(cpudev, "gicv3-maintenance-interrupt", @@ -806,9 +823,9 @@ static void create_gic(VirtMachineState *vms, MemoryRegion *mem) fdt_add_gic_node(vms); - if (type == 3 && vms->its) { + if (vms->gic_version != VIRT_GIC_VERSION_2 && vms->its) { create_its(vms); - } else if (type == 2) { + } else if (vms->gic_version == VIRT_GIC_VERSION_2) { create_v2m(vms); } } @@ -1658,10 +1675,10 @@ static uint64_t virt_cpu_mp_affinity(VirtMachineState *vms, int idx) * purposes are to make TCG consistent (with 64-bit KVM hosts) * and to improve SGI efficiency. */ - if (vms->gic_version == VIRT_GIC_VERSION_3) { - clustersz = GICV3_TARGETLIST_BITS; - } else { + if (vms->gic_version == VIRT_GIC_VERSION_2) { clustersz = GIC_TARGETLIST_BITS; + } else { + clustersz = GICV3_TARGETLIST_BITS; } } return arm_cpu_mp_affinity(idx, clustersz); @@ -1794,6 +1811,10 @@ static void finalize_gic_version(VirtMachineState *vms) error_report( "gic-version=3 is not supported with kernel-irqchip=off"); exit(1); + case VIRT_GIC_VERSION_4: + error_report( + "gic-version=4 is not supported with kernel-irqchip=off"); + exit(1); } } @@ -1831,6 +1852,9 @@ static void finalize_gic_version(VirtMachineState *vms) case VIRT_GIC_VERSION_2: case VIRT_GIC_VERSION_3: break; + case VIRT_GIC_VERSION_4: + error_report("gic-version=4 is not supported with KVM"); + exit(1); } /* Check chosen version is effectively supported by the host */ @@ -1854,7 +1878,12 @@ static void finalize_gic_version(VirtMachineState *vms) case VIRT_GIC_VERSION_MAX: if (module_object_class_by_name("arm-gicv3")) { /* CONFIG_ARM_GICV3_TCG was set */ - vms->gic_version = VIRT_GIC_VERSION_3; + if (vms->virt) { + /* GICv4 only makes sense if CPU has EL2 */ + vms->gic_version = VIRT_GIC_VERSION_4; + } else { + vms->gic_version = VIRT_GIC_VERSION_3; + } } else { vms->gic_version = VIRT_GIC_VERSION_2; } @@ -1862,6 +1891,12 @@ static void finalize_gic_version(VirtMachineState *vms) case VIRT_GIC_VERSION_HOST: error_report("gic-version=host requires KVM"); exit(1); + case VIRT_GIC_VERSION_4: + if (!vms->virt) { + error_report("gic-version=4 requires virtualization enabled"); + exit(1); + } + break; case VIRT_GIC_VERSION_2: case VIRT_GIC_VERSION_3: break; @@ -2029,16 +2064,16 @@ static void machvirt_init(MachineState *machine) vms->psci_conduit = QEMU_PSCI_CONDUIT_HVC; } - /* The maximum number of CPUs depends on the GIC version, or on how - * many redistributors we can fit into the memory map. + /* + * The maximum number of CPUs depends on the GIC version, or on how + * many redistributors we can fit into the memory map (which in turn + * depends on whether this is a GICv3 or v4). */ - if (vms->gic_version == VIRT_GIC_VERSION_3) { - virt_max_cpus = - vms->memmap[VIRT_GIC_REDIST].size / GICV3_REDIST_SIZE; - virt_max_cpus += - vms->memmap[VIRT_HIGH_GIC_REDIST2].size / GICV3_REDIST_SIZE; - } else { + if (vms->gic_version == VIRT_GIC_VERSION_2) { virt_max_cpus = GIC_NCPU; + } else { + virt_max_cpus = virt_redist_capacity(vms, VIRT_GIC_REDIST) + + virt_redist_capacity(vms, VIRT_HIGH_GIC_REDIST2); } if (max_cpus > virt_max_cpus) { @@ -2426,8 +2461,19 @@ static void virt_set_mte(Object *obj, bool value, Error **errp) static char *virt_get_gic_version(Object *obj, Error **errp) { VirtMachineState *vms = VIRT_MACHINE(obj); - const char *val = vms->gic_version == VIRT_GIC_VERSION_3 ? "3" : "2"; + const char *val; + switch (vms->gic_version) { + case VIRT_GIC_VERSION_4: + val = "4"; + break; + case VIRT_GIC_VERSION_3: + val = "3"; + break; + default: + val = "2"; + break; + } return g_strdup(val); } @@ -2435,7 +2481,9 @@ static void virt_set_gic_version(Object *obj, const char *value, Error **errp) { VirtMachineState *vms = VIRT_MACHINE(obj); - if (!strcmp(value, "3")) { + if (!strcmp(value, "4")) { + vms->gic_version = VIRT_GIC_VERSION_4; + } else if (!strcmp(value, "3")) { vms->gic_version = VIRT_GIC_VERSION_3; } else if (!strcmp(value, "2")) { vms->gic_version = VIRT_GIC_VERSION_2; @@ -2893,7 +2941,7 @@ static void virt_machine_class_init(ObjectClass *oc, void *data) virt_set_gic_version); object_class_property_set_description(oc, "gic-version", "Set GIC version. " - "Valid values are 2, 3, host and max"); + "Valid values are 2, 3, 4, host and max"); object_class_property_add_str(oc, "iommu", virt_get_iommu, virt_set_iommu); object_class_property_set_description(oc, "iommu", diff --git a/hw/char/escc.c b/hw/char/escc.c index 8755d8d34f..17a908c59b 100644 --- a/hw/char/escc.c +++ b/hw/char/escc.c @@ -828,7 +828,7 @@ static void sunkbd_handle_event(DeviceState *dev, QemuConsole *src, } } - if (qcode > qemu_input_map_qcode_to_sun_len) { + if (qcode >= qemu_input_map_qcode_to_sun_len) { return; } diff --git a/hw/intc/Kconfig b/hw/intc/Kconfig index a7cf301eab..eded1b557e 100644 --- a/hw/intc/Kconfig +++ b/hw/intc/Kconfig @@ -84,3 +84,6 @@ config GOLDFISH_PIC config M68K_IRQC bool + +config NIOS2_VIC + bool diff --git a/hw/intc/arm_gicv3_common.c b/hw/intc/arm_gicv3_common.c index 4ca5ae9bc5..5634c6fc78 100644 --- a/hw/intc/arm_gicv3_common.c +++ b/hw/intc/arm_gicv3_common.c @@ -144,6 +144,25 @@ const VMStateDescription vmstate_gicv3_cpu_sre_el1 = { } }; +static bool gicv4_needed(void *opaque) +{ + GICv3CPUState *cs = opaque; + + return cs->gic->revision > 3; +} + +const VMStateDescription vmstate_gicv3_gicv4 = { + .name = "arm_gicv3_cpu/gicv4", + .version_id = 1, + .minimum_version_id = 1, + .needed = gicv4_needed, + .fields = (VMStateField[]) { + VMSTATE_UINT64(gicr_vpropbaser, GICv3CPUState), + VMSTATE_UINT64(gicr_vpendbaser, GICv3CPUState), + VMSTATE_END_OF_LIST() + } +}; + static const VMStateDescription vmstate_gicv3_cpu = { .name = "arm_gicv3_cpu", .version_id = 1, @@ -175,6 +194,7 @@ static const VMStateDescription vmstate_gicv3_cpu = { .subsections = (const VMStateDescription * []) { &vmstate_gicv3_cpu_virt, &vmstate_gicv3_cpu_sre_el1, + &vmstate_gicv3_gicv4, NULL } }; @@ -295,7 +315,7 @@ void gicv3_init_irqs_and_mmio(GICv3State *s, qemu_irq_handler handler, memory_region_init_io(®ion->iomem, OBJECT(s), ops ? &ops[1] : NULL, region, name, - s->redist_region_count[i] * GICV3_REDIST_SIZE); + s->redist_region_count[i] * gicv3_redist_size(s)); sysbus_init_mmio(sbd, ®ion->iomem); g_free(name); } @@ -306,12 +326,14 @@ static void arm_gicv3_common_realize(DeviceState *dev, Error **errp) GICv3State *s = ARM_GICV3_COMMON(dev); int i, rdist_capacity, cpuidx; - /* revision property is actually reserved and currently used only in order - * to keep the interface compatible with GICv2 code, avoiding extra - * conditions. However, in future it could be used, for example, if we - * implement GICv4. + /* + * This GIC device supports only revisions 3 and 4. The GICv1/v2 + * is a separate device. + * Note that subclasses of this device may impose further restrictions + * on the GIC revision: notably, the in-kernel KVM GIC doesn't + * support GICv4. */ - if (s->revision != 3) { + if (s->revision != 3 && s->revision != 4) { error_setg(errp, "unsupported GIC revision %d", s->revision); return; } @@ -328,6 +350,10 @@ static void arm_gicv3_common_realize(DeviceState *dev, Error **errp) s->num_irq, GIC_INTERNAL); return; } + if (s->num_cpu == 0) { + error_setg(errp, "num-cpu must be at least 1"); + return; + } /* ITLinesNumber is represented as (N / 32) - 1, so this is an * implementation imposed restriction, not an architectural one, @@ -350,9 +376,9 @@ static void arm_gicv3_common_realize(DeviceState *dev, Error **errp) for (i = 0; i < s->nb_redist_regions; i++) { rdist_capacity += s->redist_region_count[i]; } - if (rdist_capacity < s->num_cpu) { + if (rdist_capacity != s->num_cpu) { error_setg(errp, "Capacity of the redist regions(%d) " - "is less than number of vcpus(%d)", + "does not match the number of vcpus(%d)", rdist_capacity, s->num_cpu); return; } @@ -382,8 +408,8 @@ static void arm_gicv3_common_realize(DeviceState *dev, Error **errp) * Last == 1 if this is the last redistributor in a series of * contiguous redistributor pages * DirectLPI == 0 (direct injection of LPIs not supported) - * VLPIS == 0 (virtual LPIs not supported) - * PLPIS == 0 (physical LPIs not supported) + * VLPIS == 1 if vLPIs supported (GICv4 and up) + * PLPIS == 1 if LPIs supported */ cpu_affid = object_property_get_uint(OBJECT(cpu), "mp-affinity", NULL); @@ -398,6 +424,9 @@ static void arm_gicv3_common_realize(DeviceState *dev, Error **errp) if (s->lpi_enable) { s->cpu[i].gicr_typer |= GICR_TYPER_PLPIS; + if (s->revision > 3) { + s->cpu[i].gicr_typer |= GICR_TYPER_VLPIS; + } } } @@ -410,6 +439,8 @@ static void arm_gicv3_common_realize(DeviceState *dev, Error **errp) cpuidx += s->redist_region_count[i]; s->cpu[cpuidx - 1].gicr_typer |= GICR_TYPER_LAST; } + + s->itslist = g_ptr_array_new(); } static void arm_gicv3_finalize(Object *obj) @@ -438,6 +469,8 @@ static void arm_gicv3_common_reset(DeviceState *dev) cs->gicr_waker = GICR_WAKER_ProcessorSleep | GICR_WAKER_ChildrenAsleep; cs->gicr_propbaser = 0; cs->gicr_pendbaser = 0; + cs->gicr_vpropbaser = 0; + cs->gicr_vpendbaser = 0; /* If we're resetting a TZ-aware GIC as if secure firmware * had set it up ready to start a kernel in non-secure, we * need to set interrupts to group 1 so the kernel can use them. @@ -459,6 +492,7 @@ static void arm_gicv3_common_reset(DeviceState *dev) cs->hppi.prio = 0xff; cs->hpplpi.prio = 0xff; + cs->hppvlpi.prio = 0xff; /* State in the CPU interface must *not* be reset here, because it * is part of the CPU's reset domain, not the GIC device's. diff --git a/hw/intc/arm_gicv3_cpuif.c b/hw/intc/arm_gicv3_cpuif.c index 1a3d440a54..8404f46ee0 100644 --- a/hw/intc/arm_gicv3_cpuif.c +++ b/hw/intc/arm_gicv3_cpuif.c @@ -21,6 +21,12 @@ #include "hw/irq.h" #include "cpu.h" +/* + * Special case return value from hppvi_index(); must be larger than + * the architecturally maximum possible list register index (which is 15) + */ +#define HPPVI_INDEX_VLPI 16 + static GICv3CPUState *icc_cs_from_env(CPUARMState *env) { return env->gicv3state; @@ -157,10 +163,18 @@ static int ich_highest_active_virt_prio(GICv3CPUState *cs) static int hppvi_index(GICv3CPUState *cs) { - /* Return the list register index of the highest priority pending + /* + * Return the list register index of the highest priority pending * virtual interrupt, as per the HighestPriorityVirtualInterrupt * pseudocode. If no pending virtual interrupts, return -1. + * If the highest priority pending virtual interrupt is a vLPI, + * return HPPVI_INDEX_VLPI. + * (The pseudocode handles checking whether the vLPI is higher + * priority than the highest priority list register at every + * callsite of HighestPriorityVirtualInterrupt; we check it here.) */ + ARMCPU *cpu = ARM_CPU(cs->cpu); + CPUARMState *env = &cpu->env; int idx = -1; int i; /* Note that a list register entry with a priority of 0xff will @@ -202,6 +216,23 @@ static int hppvi_index(GICv3CPUState *cs) } } + /* + * "no pending vLPI" is indicated with prio = 0xff, which always + * fails the priority check here. vLPIs are only considered + * when we are in Non-Secure state. + */ + if (cs->hppvlpi.prio < prio && !arm_is_secure(env)) { + if (cs->hppvlpi.grp == GICV3_G0) { + if (cs->ich_vmcr_el2 & ICH_VMCR_EL2_VENG0) { + return HPPVI_INDEX_VLPI; + } + } else { + if (cs->ich_vmcr_el2 & ICH_VMCR_EL2_VENG1) { + return HPPVI_INDEX_VLPI; + } + } + } + return idx; } @@ -289,6 +320,47 @@ static bool icv_hppi_can_preempt(GICv3CPUState *cs, uint64_t lr) return false; } +static bool icv_hppvlpi_can_preempt(GICv3CPUState *cs) +{ + /* + * Return true if we can signal the highest priority pending vLPI. + * We can assume we're Non-secure because hppvi_index() already + * tested for that. + */ + uint32_t mask, rprio, vpmr; + + if (!(cs->ich_hcr_el2 & ICH_HCR_EL2_EN)) { + /* Virtual interface disabled */ + return false; + } + + vpmr = extract64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VPMR_SHIFT, + ICH_VMCR_EL2_VPMR_LENGTH); + + if (cs->hppvlpi.prio >= vpmr) { + /* Priority mask masks this interrupt */ + return false; + } + + rprio = ich_highest_active_virt_prio(cs); + if (rprio == 0xff) { + /* No running interrupt so we can preempt */ + return true; + } + + mask = icv_gprio_mask(cs, cs->hppvlpi.grp); + + /* + * We only preempt a running interrupt if the pending interrupt's + * group priority is sufficient (the subpriorities are not considered). + */ + if ((cs->hppvlpi.prio & mask) < (rprio & mask)) { + return true; + } + + return false; +} + static uint32_t eoi_maintenance_interrupt_state(GICv3CPUState *cs, uint32_t *misr) { @@ -370,9 +442,55 @@ static uint32_t maintenance_interrupt_state(GICv3CPUState *cs) return value; } +void gicv3_cpuif_virt_irq_fiq_update(GICv3CPUState *cs) +{ + /* + * Tell the CPU about any pending virtual interrupts. + * This should only be called for changes that affect the + * vIRQ and vFIQ status and do not change the maintenance + * interrupt status. This means that unlike gicv3_cpuif_virt_update() + * this function won't recursively call back into the GIC code. + * The main use of this is when the redistributor has changed the + * highest priority pending virtual LPI. + */ + int idx; + int irqlevel = 0; + int fiqlevel = 0; + + idx = hppvi_index(cs); + trace_gicv3_cpuif_virt_update(gicv3_redist_affid(cs), idx, + cs->hppvlpi.irq, cs->hppvlpi.grp, + cs->hppvlpi.prio); + if (idx == HPPVI_INDEX_VLPI) { + if (icv_hppvlpi_can_preempt(cs)) { + if (cs->hppvlpi.grp == GICV3_G0) { + fiqlevel = 1; + } else { + irqlevel = 1; + } + } + } else if (idx >= 0) { + uint64_t lr = cs->ich_lr_el2[idx]; + + if (icv_hppi_can_preempt(cs, lr)) { + /* Virtual interrupts are simple: G0 are always FIQ, and G1 IRQ */ + if (lr & ICH_LR_EL2_GROUP) { + irqlevel = 1; + } else { + fiqlevel = 1; + } + } + } + + trace_gicv3_cpuif_virt_set_irqs(gicv3_redist_affid(cs), fiqlevel, irqlevel); + qemu_set_irq(cs->parent_vfiq, fiqlevel); + qemu_set_irq(cs->parent_virq, irqlevel); +} + static void gicv3_cpuif_virt_update(GICv3CPUState *cs) { - /* Tell the CPU about any pending virtual interrupts or + /* + * Tell the CPU about any pending virtual interrupts or * maintenance interrupts, following a change to the state * of the CPU interface relevant to virtual interrupts. * @@ -389,37 +507,17 @@ static void gicv3_cpuif_virt_update(GICv3CPUState *cs) * naturally as a result of there being no architectural * linkage between the physical and virtual GIC logic. */ - int idx; - int irqlevel = 0; - int fiqlevel = 0; - int maintlevel = 0; ARMCPU *cpu = ARM_CPU(cs->cpu); + int maintlevel = 0; - idx = hppvi_index(cs); - trace_gicv3_cpuif_virt_update(gicv3_redist_affid(cs), idx); - if (idx >= 0) { - uint64_t lr = cs->ich_lr_el2[idx]; - - if (icv_hppi_can_preempt(cs, lr)) { - /* Virtual interrupts are simple: G0 are always FIQ, and G1 IRQ */ - if (lr & ICH_LR_EL2_GROUP) { - irqlevel = 1; - } else { - fiqlevel = 1; - } - } - } + gicv3_cpuif_virt_irq_fiq_update(cs); if ((cs->ich_hcr_el2 & ICH_HCR_EL2_EN) && maintenance_interrupt_state(cs) != 0) { maintlevel = 1; } - trace_gicv3_cpuif_virt_set_irqs(gicv3_redist_affid(cs), fiqlevel, - irqlevel, maintlevel); - - qemu_set_irq(cs->parent_vfiq, fiqlevel); - qemu_set_irq(cs->parent_virq, irqlevel); + trace_gicv3_cpuif_virt_set_maint_irq(gicv3_redist_affid(cs), maintlevel); qemu_set_irq(cpu->gicv3_maintenance_interrupt, maintlevel); } @@ -445,7 +543,7 @@ static void icv_ap_write(CPUARMState *env, const ARMCPRegInfo *ri, cs->ich_apr[grp][regno] = value & 0xFFFFFFFFU; - gicv3_cpuif_virt_update(cs); + gicv3_cpuif_virt_irq_fiq_update(cs); return; } @@ -490,7 +588,7 @@ static void icv_bpr_write(CPUARMState *env, const ARMCPRegInfo *ri, write_vbpr(cs, grp, value); - gicv3_cpuif_virt_update(cs); + gicv3_cpuif_virt_irq_fiq_update(cs); } static uint64_t icv_pmr_read(CPUARMState *env, const ARMCPRegInfo *ri) @@ -517,7 +615,7 @@ static void icv_pmr_write(CPUARMState *env, const ARMCPRegInfo *ri, cs->ich_vmcr_el2 = deposit64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VPMR_SHIFT, ICH_VMCR_EL2_VPMR_LENGTH, value); - gicv3_cpuif_virt_update(cs); + gicv3_cpuif_virt_irq_fiq_update(cs); } static uint64_t icv_igrpen_read(CPUARMState *env, const ARMCPRegInfo *ri) @@ -584,7 +682,7 @@ static void icv_ctlr_write(CPUARMState *env, const ARMCPRegInfo *ri, cs->ich_vmcr_el2 = deposit64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VEOIM_SHIFT, 1, value & ICC_CTLR_EL1_EOIMODE ? 1 : 0); - gicv3_cpuif_virt_update(cs); + gicv3_cpuif_virt_irq_fiq_update(cs); } static uint64_t icv_rpr_read(CPUARMState *env, const ARMCPRegInfo *ri) @@ -603,7 +701,11 @@ static uint64_t icv_hppir_read(CPUARMState *env, const ARMCPRegInfo *ri) int idx = hppvi_index(cs); uint64_t value = INTID_SPURIOUS; - if (idx >= 0) { + if (idx == HPPVI_INDEX_VLPI) { + if (cs->hppvlpi.grp == grp) { + value = cs->hppvlpi.irq; + } + } else if (idx >= 0) { uint64_t lr = cs->ich_lr_el2[idx]; int thisgrp = (lr & ICH_LR_EL2_GROUP) ? GICV3_G1NS : GICV3_G0; @@ -634,6 +736,18 @@ static void icv_activate_irq(GICv3CPUState *cs, int idx, int grp) cs->ich_apr[grp][regno] |= (1 << regbit); } +static void icv_activate_vlpi(GICv3CPUState *cs) +{ + uint32_t mask = icv_gprio_mask(cs, cs->hppvlpi.grp); + int prio = cs->hppvlpi.prio & mask; + int aprbit = prio >> (8 - cs->vprebits); + int regno = aprbit / 32; + int regbit = aprbit % 32; + + cs->ich_apr[cs->hppvlpi.grp][regno] |= (1 << regbit); + gicv3_redist_vlpi_pending(cs, cs->hppvlpi.irq, 0); +} + static uint64_t icv_iar_read(CPUARMState *env, const ARMCPRegInfo *ri) { GICv3CPUState *cs = icc_cs_from_env(env); @@ -641,7 +755,12 @@ static uint64_t icv_iar_read(CPUARMState *env, const ARMCPRegInfo *ri) int idx = hppvi_index(cs); uint64_t intid = INTID_SPURIOUS; - if (idx >= 0) { + if (idx == HPPVI_INDEX_VLPI) { + if (cs->hppvlpi.grp == grp && icv_hppvlpi_can_preempt(cs)) { + intid = cs->hppvlpi.irq; + icv_activate_vlpi(cs); + } + } else if (idx >= 0) { uint64_t lr = cs->ich_lr_el2[idx]; int thisgrp = (lr & ICH_LR_EL2_GROUP) ? GICV3_G1NS : GICV3_G0; @@ -2333,7 +2452,7 @@ static void ich_ap_write(CPUARMState *env, const ARMCPRegInfo *ri, trace_gicv3_ich_ap_write(ri->crm & 1, regno, gicv3_redist_affid(cs), value); cs->ich_apr[grp][regno] = value & 0xFFFFFFFFU; - gicv3_cpuif_virt_update(cs); + gicv3_cpuif_virt_irq_fiq_update(cs); } static uint64_t ich_hcr_read(CPUARMState *env, const ARMCPRegInfo *ri) @@ -2459,11 +2578,15 @@ static uint64_t ich_vtr_read(CPUARMState *env, const ARMCPRegInfo *ri) uint64_t value; value = ((cs->num_list_regs - 1) << ICH_VTR_EL2_LISTREGS_SHIFT) - | ICH_VTR_EL2_TDS | ICH_VTR_EL2_NV4 | ICH_VTR_EL2_A3V + | ICH_VTR_EL2_TDS | ICH_VTR_EL2_A3V | (1 << ICH_VTR_EL2_IDBITS_SHIFT) | ((cs->vprebits - 1) << ICH_VTR_EL2_PREBITS_SHIFT) | ((cs->vpribits - 1) << ICH_VTR_EL2_PRIBITS_SHIFT); + if (cs->gic->revision < 4) { + value |= ICH_VTR_EL2_NV4; + } + trace_gicv3_ich_vtr_read(gicv3_redist_affid(cs), value); return value; } @@ -2616,6 +2739,12 @@ static void gicv3_cpuif_el_change_hook(ARMCPU *cpu, void *opaque) GICv3CPUState *cs = opaque; gicv3_cpuif_update(cs); + /* + * Because vLPIs are only pending in NonSecure state, + * an EL change can change the VIRQ/VFIQ status (but + * cannot affect the maintenance interrupt state) + */ + gicv3_cpuif_virt_irq_fiq_update(cs); } void gicv3_init_cpuif(GICv3State *s) diff --git a/hw/intc/arm_gicv3_dist.c b/hw/intc/arm_gicv3_dist.c index 28d913b211..b9ed955e36 100644 --- a/hw/intc/arm_gicv3_dist.c +++ b/hw/intc/arm_gicv3_dist.c @@ -383,7 +383,7 @@ static bool gicd_readl(GICv3State *s, hwaddr offset, * No1N == 1 (1-of-N SPI interrupts not supported) * A3V == 1 (non-zero values of Affinity level 3 supported) * IDbits == 0xf (we support 16-bit interrupt identifiers) - * DVIS == 0 (Direct virtual LPI injection not supported) + * DVIS == 1 (Direct virtual LPI injection supported) if GICv4 * LPIS == 1 (LPIs are supported if affinity routing is enabled) * num_LPIs == 0b00000 (bits [15:11],Number of LPIs as indicated * by GICD_TYPER.IDbits) @@ -399,8 +399,9 @@ static bool gicd_readl(GICv3State *s, hwaddr offset, * so we only need to check the DS bit. */ bool sec_extn = !(s->gicd_ctlr & GICD_CTLR_DS); + bool dvis = s->revision >= 4; - *data = (1 << 25) | (1 << 24) | (sec_extn << 10) | + *data = (1 << 25) | (1 << 24) | (dvis << 18) | (sec_extn << 10) | (s->lpi_enable << GICD_TYPER_LPIS_SHIFT) | (0xf << 19) | itlinesnumber; return true; @@ -557,7 +558,7 @@ static bool gicd_readl(GICv3State *s, hwaddr offset, } case GICD_IDREGS ... GICD_IDREGS + 0x2f: /* ID registers */ - *data = gicv3_idreg(offset - GICD_IDREGS); + *data = gicv3_idreg(s, offset - GICD_IDREGS, GICV3_PIDR0_DIST); return true; case GICD_SGIR: /* WO registers, return unknown value */ diff --git a/hw/intc/arm_gicv3_its.c b/hw/intc/arm_gicv3_its.c index 8746673213..2ff21ed6bb 100644 --- a/hw/intc/arm_gicv3_its.c +++ b/hw/intc/arm_gicv3_its.c @@ -61,6 +61,12 @@ typedef struct ITEntry { uint32_t vpeid; } ITEntry; +typedef struct VTEntry { + bool valid; + unsigned vptsize; + uint32_t rdbase; + uint64_t vptaddr; +} VTEntry; /* * The ITS spec permits a range of CONSTRAINED UNPREDICTABLE options @@ -72,13 +78,33 @@ typedef struct ITEntry { * and continue processing. * The process_* functions which handle individual ITS commands all * return an ItsCmdResult which tells process_cmdq() whether it should - * stall or keep going. + * stall, keep going because of an error, or keep going because the + * command was a success. */ typedef enum ItsCmdResult { CMD_STALL = 0, CMD_CONTINUE = 1, + CMD_CONTINUE_OK = 2, } ItsCmdResult; +/* True if the ITS supports the GICv4 virtual LPI feature */ +static bool its_feature_virtual(GICv3ITSState *s) +{ + return s->typer & R_GITS_TYPER_VIRTUAL_MASK; +} + +static inline bool intid_in_lpi_range(uint32_t id) +{ + return id >= GICV3_LPI_INTID_START && + id < (1 << (GICD_TYPER_IDBITS + 1)); +} + +static inline bool valid_doorbell(uint32_t id) +{ + /* Doorbell fields may be an LPI, or 1023 to mean "no doorbell" */ + return id == INTID_SPURIOUS || intid_in_lpi_range(id); +} + static uint64_t baser_base_addr(uint64_t value, uint32_t page_sz) { uint64_t result = 0; @@ -289,97 +315,247 @@ out: } /* - * This function handles the processing of following commands based on - * the ItsCmdType parameter passed:- - * 1. triggering of lpi interrupt translation via ITS INT command - * 2. triggering of lpi interrupt translation via gits_translater register - * 3. handling of ITS CLEAR command - * 4. handling of ITS DISCARD command + * Read the vPE Table entry at index @vpeid. On success (including + * successfully determining that there is no valid entry for this index), + * we return MEMTX_OK and populate the VTEntry struct accordingly. + * If there is an error reading memory then we return the error code. */ -static ItsCmdResult do_process_its_cmd(GICv3ITSState *s, uint32_t devid, - uint32_t eventid, ItsCmdType cmd) +static MemTxResult get_vte(GICv3ITSState *s, uint32_t vpeid, VTEntry *vte) +{ + MemTxResult res = MEMTX_OK; + AddressSpace *as = &s->gicv3->dma_as; + uint64_t entry_addr = table_entry_addr(s, &s->vpet, vpeid, &res); + uint64_t vteval; + + if (entry_addr == -1) { + /* No L2 table entry, i.e. no valid VTE, or a memory error */ + vte->valid = false; + goto out; + } + vteval = address_space_ldq_le(as, entry_addr, MEMTXATTRS_UNSPECIFIED, &res); + if (res != MEMTX_OK) { + goto out; + } + vte->valid = FIELD_EX64(vteval, VTE, VALID); + vte->vptsize = FIELD_EX64(vteval, VTE, VPTSIZE); + vte->vptaddr = FIELD_EX64(vteval, VTE, VPTADDR); + vte->rdbase = FIELD_EX64(vteval, VTE, RDBASE); +out: + if (res != MEMTX_OK) { + trace_gicv3_its_vte_read_fault(vpeid); + } else { + trace_gicv3_its_vte_read(vpeid, vte->valid, vte->vptsize, + vte->vptaddr, vte->rdbase); + } + return res; +} + +/* + * Given a (DeviceID, EventID), look up the corresponding ITE, including + * checking for the various invalid-value cases. If we find a valid ITE, + * fill in @ite and @dte and return CMD_CONTINUE_OK. Otherwise return + * CMD_STALL or CMD_CONTINUE as appropriate (and the contents of @ite + * should not be relied on). + * + * The string @who is purely for the LOG_GUEST_ERROR messages, + * and should indicate the name of the calling function or similar. + */ +static ItsCmdResult lookup_ite(GICv3ITSState *s, const char *who, + uint32_t devid, uint32_t eventid, ITEntry *ite, + DTEntry *dte) { uint64_t num_eventids; - DTEntry dte; - CTEntry cte; - ITEntry ite; if (devid >= s->dt.num_entries) { qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid command attributes: devid %d>=%d", - __func__, devid, s->dt.num_entries); + who, devid, s->dt.num_entries); return CMD_CONTINUE; } - if (get_dte(s, devid, &dte) != MEMTX_OK) { + if (get_dte(s, devid, dte) != MEMTX_OK) { return CMD_STALL; } - if (!dte.valid) { + if (!dte->valid) { qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid command attributes: " - "invalid dte for %d\n", __func__, devid); + "invalid dte for %d\n", who, devid); return CMD_CONTINUE; } - num_eventids = 1ULL << (dte.size + 1); + num_eventids = 1ULL << (dte->size + 1); if (eventid >= num_eventids) { qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid command attributes: eventid %d >= %" - PRId64 "\n", - __func__, eventid, num_eventids); + PRId64 "\n", who, eventid, num_eventids); return CMD_CONTINUE; } - if (get_ite(s, eventid, &dte, &ite) != MEMTX_OK) { + if (get_ite(s, eventid, dte, ite) != MEMTX_OK) { return CMD_STALL; } - if (!ite.valid || ite.inttype != ITE_INTTYPE_PHYSICAL) { + if (!ite->valid) { qemu_log_mask(LOG_GUEST_ERROR, - "%s: invalid command attributes: invalid ITE\n", - __func__); + "%s: invalid command attributes: invalid ITE\n", who); return CMD_CONTINUE; } - if (ite.icid >= s->ct.num_entries) { - qemu_log_mask(LOG_GUEST_ERROR, - "%s: invalid ICID 0x%x in ITE (table corrupted?)\n", - __func__, ite.icid); + return CMD_CONTINUE_OK; +} + +/* + * Given an ICID, look up the corresponding CTE, including checking for various + * invalid-value cases. If we find a valid CTE, fill in @cte and return + * CMD_CONTINUE_OK; otherwise return CMD_STALL or CMD_CONTINUE (and the + * contents of @cte should not be relied on). + * + * The string @who is purely for the LOG_GUEST_ERROR messages, + * and should indicate the name of the calling function or similar. + */ +static ItsCmdResult lookup_cte(GICv3ITSState *s, const char *who, + uint32_t icid, CTEntry *cte) +{ + if (icid >= s->ct.num_entries) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid ICID 0x%x\n", who, icid); + return CMD_CONTINUE; + } + if (get_cte(s, icid, cte) != MEMTX_OK) { + return CMD_STALL; + } + if (!cte->valid) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid CTE\n", who); + return CMD_CONTINUE; + } + if (cte->rdbase >= s->gicv3->num_cpu) { return CMD_CONTINUE; } + return CMD_CONTINUE_OK; +} - if (get_cte(s, ite.icid, &cte) != MEMTX_OK) { +/* + * Given a VPEID, look up the corresponding VTE, including checking + * for various invalid-value cases. if we find a valid VTE, fill in @vte + * and return CMD_CONTINUE_OK; otherwise return CMD_STALL or CMD_CONTINUE + * (and the contents of @vte should not be relied on). + * + * The string @who is purely for the LOG_GUEST_ERROR messages, + * and should indicate the name of the calling function or similar. + */ +static ItsCmdResult lookup_vte(GICv3ITSState *s, const char *who, + uint32_t vpeid, VTEntry *vte) +{ + if (vpeid >= s->vpet.num_entries) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid VPEID 0x%x\n", who, vpeid); + return CMD_CONTINUE; + } + + if (get_vte(s, vpeid, vte) != MEMTX_OK) { return CMD_STALL; } - if (!cte.valid) { + if (!vte->valid) { qemu_log_mask(LOG_GUEST_ERROR, - "%s: invalid command attributes: invalid CTE\n", - __func__); + "%s: invalid VTE for VPEID 0x%x\n", who, vpeid); + return CMD_CONTINUE; + } + + if (vte->rdbase >= s->gicv3->num_cpu) { + return CMD_CONTINUE; + } + return CMD_CONTINUE_OK; +} + +static ItsCmdResult process_its_cmd_phys(GICv3ITSState *s, const ITEntry *ite, + int irqlevel) +{ + CTEntry cte; + ItsCmdResult cmdres; + + cmdres = lookup_cte(s, __func__, ite->icid, &cte); + if (cmdres != CMD_CONTINUE_OK) { + return cmdres; + } + gicv3_redist_process_lpi(&s->gicv3->cpu[cte.rdbase], ite->intid, irqlevel); + return CMD_CONTINUE_OK; +} + +static ItsCmdResult process_its_cmd_virt(GICv3ITSState *s, const ITEntry *ite, + int irqlevel) +{ + VTEntry vte; + ItsCmdResult cmdres; + + cmdres = lookup_vte(s, __func__, ite->vpeid, &vte); + if (cmdres != CMD_CONTINUE_OK) { + return cmdres; + } + + if (!intid_in_lpi_range(ite->intid) || + ite->intid >= (1ULL << (vte.vptsize + 1))) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: intid 0x%x out of range\n", + __func__, ite->intid); return CMD_CONTINUE; } /* - * Current implementation only supports rdbase == procnum - * Hence rdbase physical address is ignored + * For QEMU the actual pending of the vLPI is handled in the + * redistributor code */ - if (cte.rdbase >= s->gicv3->num_cpu) { - return CMD_CONTINUE; + gicv3_redist_process_vlpi(&s->gicv3->cpu[vte.rdbase], ite->intid, + vte.vptaddr << 16, ite->doorbell, irqlevel); + return CMD_CONTINUE_OK; +} + +/* + * This function handles the processing of following commands based on + * the ItsCmdType parameter passed:- + * 1. triggering of lpi interrupt translation via ITS INT command + * 2. triggering of lpi interrupt translation via gits_translater register + * 3. handling of ITS CLEAR command + * 4. handling of ITS DISCARD command + */ +static ItsCmdResult do_process_its_cmd(GICv3ITSState *s, uint32_t devid, + uint32_t eventid, ItsCmdType cmd) +{ + DTEntry dte; + ITEntry ite; + ItsCmdResult cmdres; + int irqlevel; + + cmdres = lookup_ite(s, __func__, devid, eventid, &ite, &dte); + if (cmdres != CMD_CONTINUE_OK) { + return cmdres; } - if ((cmd == CLEAR) || (cmd == DISCARD)) { - gicv3_redist_process_lpi(&s->gicv3->cpu[cte.rdbase], ite.intid, 0); - } else { - gicv3_redist_process_lpi(&s->gicv3->cpu[cte.rdbase], ite.intid, 1); + irqlevel = (cmd == CLEAR || cmd == DISCARD) ? 0 : 1; + + switch (ite.inttype) { + case ITE_INTTYPE_PHYSICAL: + cmdres = process_its_cmd_phys(s, &ite, irqlevel); + break; + case ITE_INTTYPE_VIRTUAL: + if (!its_feature_virtual(s)) { + /* Can't happen unless guest is illegally writing to table memory */ + qemu_log_mask(LOG_GUEST_ERROR, + "%s: invalid type %d in ITE (table corrupted?)\n", + __func__, ite.inttype); + return CMD_CONTINUE; + } + cmdres = process_its_cmd_virt(s, &ite, irqlevel); + break; + default: + g_assert_not_reached(); } - if (cmd == DISCARD) { + if (cmdres == CMD_CONTINUE_OK && cmd == DISCARD) { ITEntry ite = {}; /* remove mapping from interrupt translation table */ ite.valid = false; - return update_ite(s, eventid, &dte, &ite) ? CMD_CONTINUE : CMD_STALL; + return update_ite(s, eventid, &dte, &ite) ? CMD_CONTINUE_OK : CMD_STALL; } - return CMD_CONTINUE; + return CMD_CONTINUE_OK; } + static ItsCmdResult process_its_cmd(GICv3ITSState *s, const uint64_t *cmdpkt, ItsCmdType cmd) { @@ -409,7 +585,6 @@ static ItsCmdResult process_mapti(GICv3ITSState *s, const uint64_t *cmdpkt, uint32_t devid, eventid; uint32_t pIntid = 0; uint64_t num_eventids; - uint32_t num_intids; uint16_t icid = 0; DTEntry dte; ITEntry ite; @@ -437,7 +612,6 @@ static ItsCmdResult process_mapti(GICv3ITSState *s, const uint64_t *cmdpkt, return CMD_STALL; } num_eventids = 1ULL << (dte.size + 1); - num_intids = 1ULL << (GICD_TYPER_IDBITS + 1); if (icid >= s->ct.num_entries) { qemu_log_mask(LOG_GUEST_ERROR, @@ -459,7 +633,7 @@ static ItsCmdResult process_mapti(GICv3ITSState *s, const uint64_t *cmdpkt, return CMD_CONTINUE; } - if (pIntid < GICV3_LPI_INTID_START || pIntid >= num_intids) { + if (!intid_in_lpi_range(pIntid)) { qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid interrupt ID 0x%x\n", __func__, pIntid); return CMD_CONTINUE; @@ -472,7 +646,86 @@ static ItsCmdResult process_mapti(GICv3ITSState *s, const uint64_t *cmdpkt, ite.icid = icid; ite.doorbell = INTID_SPURIOUS; ite.vpeid = 0; - return update_ite(s, eventid, &dte, &ite) ? CMD_CONTINUE : CMD_STALL; + return update_ite(s, eventid, &dte, &ite) ? CMD_CONTINUE_OK : CMD_STALL; +} + +static ItsCmdResult process_vmapti(GICv3ITSState *s, const uint64_t *cmdpkt, + bool ignore_vintid) +{ + uint32_t devid, eventid, vintid, doorbell, vpeid; + uint32_t num_eventids; + DTEntry dte; + ITEntry ite; + + if (!its_feature_virtual(s)) { + return CMD_CONTINUE; + } + + devid = FIELD_EX64(cmdpkt[0], VMAPTI_0, DEVICEID); + eventid = FIELD_EX64(cmdpkt[1], VMAPTI_1, EVENTID); + vpeid = FIELD_EX64(cmdpkt[1], VMAPTI_1, VPEID); + doorbell = FIELD_EX64(cmdpkt[2], VMAPTI_2, DOORBELL); + if (ignore_vintid) { + vintid = eventid; + trace_gicv3_its_cmd_vmapi(devid, eventid, vpeid, doorbell); + } else { + vintid = FIELD_EX64(cmdpkt[2], VMAPTI_2, VINTID); + trace_gicv3_its_cmd_vmapti(devid, eventid, vpeid, vintid, doorbell); + } + + if (devid >= s->dt.num_entries) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: invalid DeviceID 0x%x (must be less than 0x%x)\n", + __func__, devid, s->dt.num_entries); + return CMD_CONTINUE; + } + + if (get_dte(s, devid, &dte) != MEMTX_OK) { + return CMD_STALL; + } + + if (!dte.valid) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: no entry in device table for DeviceID 0x%x\n", + __func__, devid); + return CMD_CONTINUE; + } + + num_eventids = 1ULL << (dte.size + 1); + + if (eventid >= num_eventids) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: EventID 0x%x too large for DeviceID 0x%x " + "(must be less than 0x%x)\n", + __func__, eventid, devid, num_eventids); + return CMD_CONTINUE; + } + if (!intid_in_lpi_range(vintid)) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: VIntID 0x%x not a valid LPI\n", + __func__, vintid); + return CMD_CONTINUE; + } + if (!valid_doorbell(doorbell)) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Doorbell %d not 1023 and not a valid LPI\n", + __func__, doorbell); + return CMD_CONTINUE; + } + if (vpeid >= s->vpet.num_entries) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: VPEID 0x%x out of range (must be less than 0x%x)\n", + __func__, vpeid, s->vpet.num_entries); + return CMD_CONTINUE; + } + /* add ite entry to interrupt translation table */ + ite.valid = true; + ite.inttype = ITE_INTTYPE_VIRTUAL; + ite.intid = vintid; + ite.icid = 0; + ite.doorbell = doorbell; + ite.vpeid = vpeid; + return update_ite(s, eventid, &dte, &ite) ? CMD_CONTINUE_OK : CMD_STALL; } /* @@ -533,7 +786,7 @@ static ItsCmdResult process_mapc(GICv3ITSState *s, const uint64_t *cmdpkt) return CMD_CONTINUE; } - return update_cte(s, icid, &cte) ? CMD_CONTINUE : CMD_STALL; + return update_cte(s, icid, &cte) ? CMD_CONTINUE_OK : CMD_STALL; } /* @@ -594,7 +847,7 @@ static ItsCmdResult process_mapd(GICv3ITSState *s, const uint64_t *cmdpkt) return CMD_CONTINUE; } - return update_dte(s, devid, &dte) ? CMD_CONTINUE : CMD_STALL; + return update_dte(s, devid, &dte) ? CMD_CONTINUE_OK : CMD_STALL; } static ItsCmdResult process_movall(GICv3ITSState *s, const uint64_t *cmdpkt) @@ -623,23 +876,23 @@ static ItsCmdResult process_movall(GICv3ITSState *s, const uint64_t *cmdpkt) if (rd1 == rd2) { /* Move to same target must succeed as a no-op */ - return CMD_CONTINUE; + return CMD_CONTINUE_OK; } /* Move all pending LPIs from redistributor 1 to redistributor 2 */ gicv3_redist_movall_lpis(&s->gicv3->cpu[rd1], &s->gicv3->cpu[rd2]); - return CMD_CONTINUE; + return CMD_CONTINUE_OK; } static ItsCmdResult process_movi(GICv3ITSState *s, const uint64_t *cmdpkt) { uint32_t devid, eventid; uint16_t new_icid; - uint64_t num_eventids; DTEntry dte; CTEntry old_cte, new_cte; ITEntry old_ite; + ItsCmdResult cmdres; devid = FIELD_EX64(cmdpkt[0], MOVI_0, DEVICEID); eventid = FIELD_EX64(cmdpkt[1], MOVI_1, EVENTID); @@ -647,103 +900,346 @@ static ItsCmdResult process_movi(GICv3ITSState *s, const uint64_t *cmdpkt) trace_gicv3_its_cmd_movi(devid, eventid, new_icid); - if (devid >= s->dt.num_entries) { + cmdres = lookup_ite(s, __func__, devid, eventid, &old_ite, &dte); + if (cmdres != CMD_CONTINUE_OK) { + return cmdres; + } + + if (old_ite.inttype != ITE_INTTYPE_PHYSICAL) { qemu_log_mask(LOG_GUEST_ERROR, - "%s: invalid command attributes: devid %d>=%d", - __func__, devid, s->dt.num_entries); + "%s: invalid command attributes: invalid ITE\n", + __func__); return CMD_CONTINUE; } - if (get_dte(s, devid, &dte) != MEMTX_OK) { - return CMD_STALL; + + cmdres = lookup_cte(s, __func__, old_ite.icid, &old_cte); + if (cmdres != CMD_CONTINUE_OK) { + return cmdres; + } + cmdres = lookup_cte(s, __func__, new_icid, &new_cte); + if (cmdres != CMD_CONTINUE_OK) { + return cmdres; } - if (!dte.valid) { - qemu_log_mask(LOG_GUEST_ERROR, - "%s: invalid command attributes: " - "invalid dte for %d\n", __func__, devid); + if (old_cte.rdbase != new_cte.rdbase) { + /* Move the LPI from the old redistributor to the new one */ + gicv3_redist_mov_lpi(&s->gicv3->cpu[old_cte.rdbase], + &s->gicv3->cpu[new_cte.rdbase], + old_ite.intid); + } + + /* Update the ICID field in the interrupt translation table entry */ + old_ite.icid = new_icid; + return update_ite(s, eventid, &dte, &old_ite) ? CMD_CONTINUE_OK : CMD_STALL; +} + +/* + * Update the vPE Table entry at index @vpeid with the entry @vte. + * Returns true on success, false if there was a memory access error. + */ +static bool update_vte(GICv3ITSState *s, uint32_t vpeid, const VTEntry *vte) +{ + AddressSpace *as = &s->gicv3->dma_as; + uint64_t entry_addr; + uint64_t vteval = 0; + MemTxResult res = MEMTX_OK; + + trace_gicv3_its_vte_write(vpeid, vte->valid, vte->vptsize, vte->vptaddr, + vte->rdbase); + + if (vte->valid) { + vteval = FIELD_DP64(vteval, VTE, VALID, 1); + vteval = FIELD_DP64(vteval, VTE, VPTSIZE, vte->vptsize); + vteval = FIELD_DP64(vteval, VTE, VPTADDR, vte->vptaddr); + vteval = FIELD_DP64(vteval, VTE, RDBASE, vte->rdbase); + } + + entry_addr = table_entry_addr(s, &s->vpet, vpeid, &res); + if (res != MEMTX_OK) { + return false; + } + if (entry_addr == -1) { + /* No L2 table for this index: discard write and continue */ + return true; + } + address_space_stq_le(as, entry_addr, vteval, MEMTXATTRS_UNSPECIFIED, &res); + return res == MEMTX_OK; +} + +static ItsCmdResult process_vmapp(GICv3ITSState *s, const uint64_t *cmdpkt) +{ + VTEntry vte; + uint32_t vpeid; + + if (!its_feature_virtual(s)) { return CMD_CONTINUE; } - num_eventids = 1ULL << (dte.size + 1); - if (eventid >= num_eventids) { + vpeid = FIELD_EX64(cmdpkt[1], VMAPP_1, VPEID); + vte.rdbase = FIELD_EX64(cmdpkt[2], VMAPP_2, RDBASE); + vte.valid = FIELD_EX64(cmdpkt[2], VMAPP_2, V); + vte.vptsize = FIELD_EX64(cmdpkt[3], VMAPP_3, VPTSIZE); + vte.vptaddr = FIELD_EX64(cmdpkt[3], VMAPP_3, VPTADDR); + + trace_gicv3_its_cmd_vmapp(vpeid, vte.rdbase, vte.valid, + vte.vptaddr, vte.vptsize); + + /* + * For GICv4.0 the VPT_size field is only 5 bits, whereas we + * define our field macros to include the full GICv4.1 8 bits. + * The range check on VPT_size will catch the cases where + * the guest set the RES0-in-GICv4.0 bits [7:6]. + */ + if (vte.vptsize > FIELD_EX64(s->typer, GITS_TYPER, IDBITS)) { qemu_log_mask(LOG_GUEST_ERROR, - "%s: invalid command attributes: eventid %d >= %" - PRId64 "\n", - __func__, eventid, num_eventids); + "%s: invalid VPT_size 0x%x\n", __func__, vte.vptsize); return CMD_CONTINUE; } - if (get_ite(s, eventid, &dte, &old_ite) != MEMTX_OK) { - return CMD_STALL; + if (vte.valid && vte.rdbase >= s->gicv3->num_cpu) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: invalid rdbase 0x%x\n", __func__, vte.rdbase); + return CMD_CONTINUE; } - if (!old_ite.valid || old_ite.inttype != ITE_INTTYPE_PHYSICAL) { + if (vpeid >= s->vpet.num_entries) { qemu_log_mask(LOG_GUEST_ERROR, - "%s: invalid command attributes: invalid ITE\n", - __func__); + "%s: VPEID 0x%x out of range (must be less than 0x%x)\n", + __func__, vpeid, s->vpet.num_entries); return CMD_CONTINUE; } - if (old_ite.icid >= s->ct.num_entries) { - qemu_log_mask(LOG_GUEST_ERROR, - "%s: invalid ICID 0x%x in ITE (table corrupted?)\n", - __func__, old_ite.icid); + return update_vte(s, vpeid, &vte) ? CMD_CONTINUE_OK : CMD_STALL; +} + +typedef struct VmovpCallbackData { + uint64_t rdbase; + uint32_t vpeid; + /* + * Overall command result. If more than one callback finds an + * error, STALL beats CONTINUE. + */ + ItsCmdResult result; +} VmovpCallbackData; + +static void vmovp_callback(gpointer data, gpointer opaque) +{ + /* + * This function is called to update the VPEID field in a VPE + * table entry for this ITS. This might be because of a VMOVP + * command executed on any ITS that is connected to the same GIC + * as this ITS. We need to read the VPE table entry for the VPEID + * and update its RDBASE field. + */ + GICv3ITSState *s = data; + VmovpCallbackData *cbdata = opaque; + VTEntry vte; + ItsCmdResult cmdres; + + cmdres = lookup_vte(s, __func__, cbdata->vpeid, &vte); + switch (cmdres) { + case CMD_STALL: + cbdata->result = CMD_STALL; + return; + case CMD_CONTINUE: + if (cbdata->result != CMD_STALL) { + cbdata->result = CMD_CONTINUE; + } + return; + case CMD_CONTINUE_OK: + break; + } + + vte.rdbase = cbdata->rdbase; + if (!update_vte(s, cbdata->vpeid, &vte)) { + cbdata->result = CMD_STALL; + } +} + +static ItsCmdResult process_vmovp(GICv3ITSState *s, const uint64_t *cmdpkt) +{ + VmovpCallbackData cbdata; + + if (!its_feature_virtual(s)) { return CMD_CONTINUE; } - if (new_icid >= s->ct.num_entries) { - qemu_log_mask(LOG_GUEST_ERROR, - "%s: invalid command attributes: ICID 0x%x\n", - __func__, new_icid); + cbdata.vpeid = FIELD_EX64(cmdpkt[1], VMOVP_1, VPEID); + cbdata.rdbase = FIELD_EX64(cmdpkt[2], VMOVP_2, RDBASE); + + trace_gicv3_its_cmd_vmovp(cbdata.vpeid, cbdata.rdbase); + + if (cbdata.rdbase >= s->gicv3->num_cpu) { return CMD_CONTINUE; } - if (get_cte(s, old_ite.icid, &old_cte) != MEMTX_OK) { - return CMD_STALL; + /* + * Our ITS implementation reports GITS_TYPER.VMOVP == 1, which means + * that when the VMOVP command is executed on an ITS to change the + * VPEID field in a VPE table entry the change must be propagated + * to all the ITSes connected to the same GIC. + */ + cbdata.result = CMD_CONTINUE_OK; + gicv3_foreach_its(s->gicv3, vmovp_callback, &cbdata); + return cbdata.result; +} + +static ItsCmdResult process_vmovi(GICv3ITSState *s, const uint64_t *cmdpkt) +{ + uint32_t devid, eventid, vpeid, doorbell; + bool doorbell_valid; + DTEntry dte; + ITEntry ite; + VTEntry old_vte, new_vte; + ItsCmdResult cmdres; + + if (!its_feature_virtual(s)) { + return CMD_CONTINUE; } - if (!old_cte.valid) { + + devid = FIELD_EX64(cmdpkt[0], VMOVI_0, DEVICEID); + eventid = FIELD_EX64(cmdpkt[1], VMOVI_1, EVENTID); + vpeid = FIELD_EX64(cmdpkt[1], VMOVI_1, VPEID); + doorbell_valid = FIELD_EX64(cmdpkt[2], VMOVI_2, D); + doorbell = FIELD_EX64(cmdpkt[2], VMOVI_2, DOORBELL); + + trace_gicv3_its_cmd_vmovi(devid, eventid, vpeid, doorbell_valid, doorbell); + + if (doorbell_valid && !valid_doorbell(doorbell)) { qemu_log_mask(LOG_GUEST_ERROR, - "%s: invalid command attributes: " - "invalid CTE for old ICID 0x%x\n", - __func__, old_ite.icid); + "%s: invalid doorbell 0x%x\n", __func__, doorbell); return CMD_CONTINUE; } - if (get_cte(s, new_icid, &new_cte) != MEMTX_OK) { - return CMD_STALL; + cmdres = lookup_ite(s, __func__, devid, eventid, &ite, &dte); + if (cmdres != CMD_CONTINUE_OK) { + return cmdres; } - if (!new_cte.valid) { - qemu_log_mask(LOG_GUEST_ERROR, - "%s: invalid command attributes: " - "invalid CTE for new ICID 0x%x\n", - __func__, new_icid); + + if (ite.inttype != ITE_INTTYPE_VIRTUAL) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: ITE is not for virtual interrupt\n", + __func__); return CMD_CONTINUE; } - if (old_cte.rdbase >= s->gicv3->num_cpu) { + cmdres = lookup_vte(s, __func__, ite.vpeid, &old_vte); + if (cmdres != CMD_CONTINUE_OK) { + return cmdres; + } + cmdres = lookup_vte(s, __func__, vpeid, &new_vte); + if (cmdres != CMD_CONTINUE_OK) { + return cmdres; + } + + if (!intid_in_lpi_range(ite.intid) || + ite.intid >= (1ULL << (old_vte.vptsize + 1)) || + ite.intid >= (1ULL << (new_vte.vptsize + 1))) { qemu_log_mask(LOG_GUEST_ERROR, - "%s: CTE has invalid rdbase 0x%x\n", - __func__, old_cte.rdbase); + "%s: ITE intid 0x%x out of range\n", + __func__, ite.intid); return CMD_CONTINUE; } - if (new_cte.rdbase >= s->gicv3->num_cpu) { - qemu_log_mask(LOG_GUEST_ERROR, - "%s: CTE has invalid rdbase 0x%x\n", - __func__, new_cte.rdbase); + ite.vpeid = vpeid; + if (doorbell_valid) { + ite.doorbell = doorbell; + } + + /* + * Move the LPI from the old redistributor to the new one. We don't + * need to do anything if the guest somehow specified the + * same pending table for source and destination. + */ + if (old_vte.vptaddr != new_vte.vptaddr) { + gicv3_redist_mov_vlpi(&s->gicv3->cpu[old_vte.rdbase], + old_vte.vptaddr << 16, + &s->gicv3->cpu[new_vte.rdbase], + new_vte.vptaddr << 16, + ite.intid, + ite.doorbell); + } + + /* Update the ITE to the new VPEID and possibly doorbell values */ + return update_ite(s, eventid, &dte, &ite) ? CMD_CONTINUE_OK : CMD_STALL; +} + +static ItsCmdResult process_vinvall(GICv3ITSState *s, const uint64_t *cmdpkt) +{ + VTEntry vte; + uint32_t vpeid; + ItsCmdResult cmdres; + + if (!its_feature_virtual(s)) { return CMD_CONTINUE; } - if (old_cte.rdbase != new_cte.rdbase) { - /* Move the LPI from the old redistributor to the new one */ - gicv3_redist_mov_lpi(&s->gicv3->cpu[old_cte.rdbase], - &s->gicv3->cpu[new_cte.rdbase], - old_ite.intid); + vpeid = FIELD_EX64(cmdpkt[1], VINVALL_1, VPEID); + + trace_gicv3_its_cmd_vinvall(vpeid); + + cmdres = lookup_vte(s, __func__, vpeid, &vte); + if (cmdres != CMD_CONTINUE_OK) { + return cmdres; } - /* Update the ICID field in the interrupt translation table entry */ - old_ite.icid = new_icid; - return update_ite(s, eventid, &dte, &old_ite) ? CMD_CONTINUE : CMD_STALL; + gicv3_redist_vinvall(&s->gicv3->cpu[vte.rdbase], vte.vptaddr << 16); + return CMD_CONTINUE_OK; +} + +static ItsCmdResult process_inv(GICv3ITSState *s, const uint64_t *cmdpkt) +{ + uint32_t devid, eventid; + ITEntry ite; + DTEntry dte; + CTEntry cte; + VTEntry vte; + ItsCmdResult cmdres; + + devid = FIELD_EX64(cmdpkt[0], INV_0, DEVICEID); + eventid = FIELD_EX64(cmdpkt[1], INV_1, EVENTID); + + trace_gicv3_its_cmd_inv(devid, eventid); + + cmdres = lookup_ite(s, __func__, devid, eventid, &ite, &dte); + if (cmdres != CMD_CONTINUE_OK) { + return cmdres; + } + + switch (ite.inttype) { + case ITE_INTTYPE_PHYSICAL: + cmdres = lookup_cte(s, __func__, ite.icid, &cte); + if (cmdres != CMD_CONTINUE_OK) { + return cmdres; + } + gicv3_redist_inv_lpi(&s->gicv3->cpu[cte.rdbase], ite.intid); + break; + case ITE_INTTYPE_VIRTUAL: + if (!its_feature_virtual(s)) { + /* Can't happen unless guest is illegally writing to table memory */ + qemu_log_mask(LOG_GUEST_ERROR, + "%s: invalid type %d in ITE (table corrupted?)\n", + __func__, ite.inttype); + return CMD_CONTINUE; + } + + cmdres = lookup_vte(s, __func__, ite.vpeid, &vte); + if (cmdres != CMD_CONTINUE_OK) { + return cmdres; + } + if (!intid_in_lpi_range(ite.intid) || + ite.intid >= (1ULL << (vte.vptsize + 1))) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: intid 0x%x out of range\n", + __func__, ite.intid); + return CMD_CONTINUE; + } + gicv3_redist_inv_vlpi(&s->gicv3->cpu[vte.rdbase], ite.intid, + vte.vptaddr << 16); + break; + default: + g_assert_not_reached(); + } + + return CMD_CONTINUE_OK; } /* @@ -782,7 +1278,7 @@ static void process_cmdq(GICv3ITSState *s) } while (wr_offset != rd_offset) { - ItsCmdResult result = CMD_CONTINUE; + ItsCmdResult result = CMD_CONTINUE_OK; void *hostmem; hwaddr buflen; uint64_t cmdpkt[GITS_CMDQ_ENTRY_WORDS]; @@ -827,6 +1323,17 @@ static void process_cmdq(GICv3ITSState *s) */ trace_gicv3_its_cmd_sync(); break; + case GITS_CMD_VSYNC: + /* + * VSYNC also is a nop, because our implementation is always + * in sync. + */ + if (!its_feature_virtual(s)) { + result = CMD_CONTINUE; + break; + } + trace_gicv3_its_cmd_vsync(); + break; case GITS_CMD_MAPD: result = process_mapd(s, cmdpkt); break; @@ -843,14 +1350,18 @@ static void process_cmdq(GICv3ITSState *s) result = process_its_cmd(s, cmdpkt, DISCARD); break; case GITS_CMD_INV: + result = process_inv(s, cmdpkt); + break; case GITS_CMD_INVALL: /* * Current implementation doesn't cache any ITS tables, * but the calculated lpi priority information. We only * need to trigger lpi priority re-calculation to be in * sync with LPI config table or pending table changes. + * INVALL operates on a collection specified by ICID so + * it only affects physical LPIs. */ - trace_gicv3_its_cmd_inv(); + trace_gicv3_its_cmd_invall(); for (i = 0; i < s->gicv3->num_cpu; i++) { gicv3_redist_update_lpi(&s->gicv3->cpu[i]); } @@ -861,11 +1372,30 @@ static void process_cmdq(GICv3ITSState *s) case GITS_CMD_MOVALL: result = process_movall(s, cmdpkt); break; + case GITS_CMD_VMAPTI: + result = process_vmapti(s, cmdpkt, false); + break; + case GITS_CMD_VMAPI: + result = process_vmapti(s, cmdpkt, true); + break; + case GITS_CMD_VMAPP: + result = process_vmapp(s, cmdpkt); + break; + case GITS_CMD_VMOVP: + result = process_vmovp(s, cmdpkt); + break; + case GITS_CMD_VMOVI: + result = process_vmovi(s, cmdpkt); + break; + case GITS_CMD_VINVALL: + result = process_vinvall(s, cmdpkt); + break; default: trace_gicv3_its_cmd_unknown(cmd); break; } - if (result == CMD_CONTINUE) { + if (result != CMD_STALL) { + /* CMD_CONTINUE or CMD_CONTINUE_OK */ rd_offset++; rd_offset %= s->cq.num_entries; s->creadr = FIELD_DP64(s->creadr, GITS_CREADR, OFFSET, rd_offset); @@ -941,6 +1471,15 @@ static void extract_table_params(GICv3ITSState *s) idbits = 16; } break; + case GITS_BASER_TYPE_VPE: + td = &s->vpet; + /* + * For QEMU vPEIDs are always 16 bits. (GICv4.1 allows an + * implementation to implement fewer bits and report this + * via GICD_TYPER2.) + */ + idbits = 16; + break; default: /* * GITS_BASER<n>.TYPE is read-only, so GITS_BASER_RO_MASK @@ -1160,7 +1699,7 @@ static bool its_readl(GICv3ITSState *s, hwaddr offset, break; case GITS_IDREGS ... GITS_IDREGS + 0x2f: /* ID registers */ - *data = gicv3_idreg(offset - GITS_IDREGS); + *data = gicv3_idreg(s->gicv3, offset - GITS_IDREGS, GICV3_PIDR0_ITS); break; case GITS_TYPER: *data = extract64(s->typer, 0, 32); @@ -1395,6 +1934,8 @@ static void gicv3_arm_its_realize(DeviceState *dev, Error **errp) } } + gicv3_add_its(s->gicv3, dev); + gicv3_its_init_mmio(s, &gicv3_its_control_ops, &gicv3_its_translation_ops); /* set the ITS default features supported */ @@ -1405,6 +1946,11 @@ static void gicv3_arm_its_realize(DeviceState *dev, Error **errp) s->typer = FIELD_DP64(s->typer, GITS_TYPER, DEVBITS, ITS_DEVBITS); s->typer = FIELD_DP64(s->typer, GITS_TYPER, CIL, 1); s->typer = FIELD_DP64(s->typer, GITS_TYPER, CIDBITS, ITS_CIDBITS); + if (s->gicv3->revision >= 4) { + /* Our VMOVP handles cross-ITS synchronization itself */ + s->typer = FIELD_DP64(s->typer, GITS_TYPER, VMOVP, 1); + s->typer = FIELD_DP64(s->typer, GITS_TYPER, VIRTUAL, 1); + } } static void gicv3_its_reset(DeviceState *dev) @@ -1420,6 +1966,7 @@ static void gicv3_its_reset(DeviceState *dev) /* * setting GITS_BASER0.Type = 0b001 (Device) * GITS_BASER1.Type = 0b100 (Collection Table) + * GITS_BASER2.Type = 0b010 (vPE) for GICv4 and later * GITS_BASER<n>.Type,where n = 3 to 7 are 0b00 (Unimplemented) * GITS_BASER<0,1>.Page_Size = 64KB * and default translation table entry size to 16 bytes @@ -1437,6 +1984,15 @@ static void gicv3_its_reset(DeviceState *dev) GITS_BASER_PAGESIZE_64K); s->baser[1] = FIELD_DP64(s->baser[1], GITS_BASER, ENTRYSIZE, GITS_CTE_SIZE - 1); + + if (its_feature_virtual(s)) { + s->baser[2] = FIELD_DP64(s->baser[2], GITS_BASER, TYPE, + GITS_BASER_TYPE_VPE); + s->baser[2] = FIELD_DP64(s->baser[2], GITS_BASER, PAGESIZE, + GITS_BASER_PAGESIZE_64K); + s->baser[2] = FIELD_DP64(s->baser[2], GITS_BASER, ENTRYSIZE, + GITS_VPE_SIZE - 1); + } } static void gicv3_its_post_load(GICv3ITSState *s) diff --git a/hw/intc/arm_gicv3_its_kvm.c b/hw/intc/arm_gicv3_its_kvm.c index 0b4cbed28b..529c7bd494 100644 --- a/hw/intc/arm_gicv3_its_kvm.c +++ b/hw/intc/arm_gicv3_its_kvm.c @@ -106,6 +106,8 @@ static void kvm_arm_its_realize(DeviceState *dev, Error **errp) kvm_arm_register_device(&s->iomem_its_cntrl, -1, KVM_DEV_ARM_VGIC_GRP_ADDR, KVM_VGIC_ITS_ADDR_TYPE, s->dev_fd, 0); + gicv3_add_its(s->gicv3, dev); + gicv3_its_init_mmio(s, NULL, NULL); if (!kvm_device_check_attr(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_ITS_REGS, diff --git a/hw/intc/arm_gicv3_kvm.c b/hw/intc/arm_gicv3_kvm.c index 5ec5ff9ef6..06f5aceee5 100644 --- a/hw/intc/arm_gicv3_kvm.c +++ b/hw/intc/arm_gicv3_kvm.c @@ -781,6 +781,11 @@ static void kvm_arm_gicv3_realize(DeviceState *dev, Error **errp) return; } + if (s->revision != 3) { + error_setg(errp, "unsupported GIC revision %d for in-kernel GIC", + s->revision); + } + if (s->security_extn) { error_setg(errp, "the in-kernel VGICv3 does not implement the " "security extensions"); diff --git a/hw/intc/arm_gicv3_redist.c b/hw/intc/arm_gicv3_redist.c index 412a04f59c..c3d4cdd66b 100644 --- a/hw/intc/arm_gicv3_redist.c +++ b/hw/intc/arm_gicv3_redist.c @@ -60,6 +60,132 @@ static uint32_t gicr_read_bitmap_reg(GICv3CPUState *cs, MemTxAttrs attrs, return reg; } +static bool vcpu_resident(GICv3CPUState *cs, uint64_t vptaddr) +{ + /* + * Return true if a vCPU is resident, which is defined by + * whether the GICR_VPENDBASER register is marked VALID and + * has the right virtual pending table address. + */ + if (!FIELD_EX64(cs->gicr_vpendbaser, GICR_VPENDBASER, VALID)) { + return false; + } + return vptaddr == (cs->gicr_vpendbaser & R_GICR_VPENDBASER_PHYADDR_MASK); +} + +/** + * update_for_one_lpi: Update pending information if this LPI is better + * + * @cs: GICv3CPUState + * @irq: interrupt to look up in the LPI Configuration table + * @ctbase: physical address of the LPI Configuration table to use + * @ds: true if priority value should not be shifted + * @hpp: points to pending information to update + * + * Look up @irq in the Configuration table specified by @ctbase + * to see if it is enabled and what its priority is. If it is an + * enabled interrupt with a higher priority than that currently + * recorded in @hpp, update @hpp. + */ +static void update_for_one_lpi(GICv3CPUState *cs, int irq, + uint64_t ctbase, bool ds, PendingIrq *hpp) +{ + uint8_t lpite; + uint8_t prio; + + address_space_read(&cs->gic->dma_as, + ctbase + ((irq - GICV3_LPI_INTID_START) * sizeof(lpite)), + MEMTXATTRS_UNSPECIFIED, &lpite, sizeof(lpite)); + + if (!(lpite & LPI_CTE_ENABLED)) { + return; + } + + if (ds) { + prio = lpite & LPI_PRIORITY_MASK; + } else { + prio = ((lpite & LPI_PRIORITY_MASK) >> 1) | 0x80; + } + + if ((prio < hpp->prio) || + ((prio == hpp->prio) && (irq <= hpp->irq))) { + hpp->irq = irq; + hpp->prio = prio; + /* LPIs and vLPIs are always non-secure Grp1 interrupts */ + hpp->grp = GICV3_G1NS; + } +} + +/** + * update_for_all_lpis: Fully scan LPI tables and find best pending LPI + * + * @cs: GICv3CPUState + * @ptbase: physical address of LPI Pending table + * @ctbase: physical address of LPI Configuration table + * @ptsizebits: size of tables, specified as number of interrupt ID bits minus 1 + * @ds: true if priority value should not be shifted + * @hpp: points to pending information to set + * + * Recalculate the highest priority pending enabled LPI from scratch, + * and set @hpp accordingly. + * + * We scan the LPI pending table @ptbase; for each pending LPI, we read the + * corresponding entry in the LPI configuration table @ctbase to extract + * the priority and enabled information. + * + * We take @ptsizebits in the form idbits-1 because this is the way that + * LPI table sizes are architecturally specified in GICR_PROPBASER.IDBits + * and in the VMAPP command's VPT_size field. + */ +static void update_for_all_lpis(GICv3CPUState *cs, uint64_t ptbase, + uint64_t ctbase, unsigned ptsizebits, + bool ds, PendingIrq *hpp) +{ + AddressSpace *as = &cs->gic->dma_as; + uint8_t pend; + uint32_t pendt_size = (1ULL << (ptsizebits + 1)); + int i, bit; + + hpp->prio = 0xff; + + for (i = GICV3_LPI_INTID_START / 8; i < pendt_size / 8; i++) { + address_space_read(as, ptbase + i, MEMTXATTRS_UNSPECIFIED, &pend, 1); + while (pend) { + bit = ctz32(pend); + update_for_one_lpi(cs, i * 8 + bit, ctbase, ds, hpp); + pend &= ~(1 << bit); + } + } +} + +/** + * set_lpi_pending_bit: Set or clear pending bit for an LPI + * + * @cs: GICv3CPUState + * @ptbase: physical address of LPI Pending table + * @irq: LPI to change pending state for + * @level: false to clear pending state, true to set + * + * Returns true if we needed to do something, false if the pending bit + * was already at @level. + */ +static bool set_pending_table_bit(GICv3CPUState *cs, uint64_t ptbase, + int irq, bool level) +{ + AddressSpace *as = &cs->gic->dma_as; + uint64_t addr = ptbase + irq / 8; + uint8_t pend; + + address_space_read(as, addr, MEMTXATTRS_UNSPECIFIED, &pend, 1); + if (extract32(pend, irq % 8, 1) == level) { + /* Bit already at requested state, no action required */ + return false; + } + pend = deposit32(pend, irq % 8, 1, level ? 1 : 0); + address_space_write(as, addr, MEMTXATTRS_UNSPECIFIED, &pend, 1); + return true; +} + static uint8_t gicr_read_ipriorityr(GICv3CPUState *cs, MemTxAttrs attrs, int irq) { @@ -100,6 +226,87 @@ static void gicr_write_ipriorityr(GICv3CPUState *cs, MemTxAttrs attrs, int irq, cs->gicr_ipriorityr[irq] = value; } +static void gicv3_redist_update_vlpi_only(GICv3CPUState *cs) +{ + uint64_t ptbase, ctbase, idbits; + + if (!FIELD_EX64(cs->gicr_vpendbaser, GICR_VPENDBASER, VALID)) { + cs->hppvlpi.prio = 0xff; + return; + } + + ptbase = cs->gicr_vpendbaser & R_GICR_VPENDBASER_PHYADDR_MASK; + ctbase = cs->gicr_vpropbaser & R_GICR_VPROPBASER_PHYADDR_MASK; + idbits = FIELD_EX64(cs->gicr_vpropbaser, GICR_VPROPBASER, IDBITS); + + update_for_all_lpis(cs, ptbase, ctbase, idbits, true, &cs->hppvlpi); +} + +static void gicv3_redist_update_vlpi(GICv3CPUState *cs) +{ + gicv3_redist_update_vlpi_only(cs); + gicv3_cpuif_virt_irq_fiq_update(cs); +} + +static void gicr_write_vpendbaser(GICv3CPUState *cs, uint64_t newval) +{ + /* Write @newval to GICR_VPENDBASER, handling its effects */ + bool oldvalid = FIELD_EX64(cs->gicr_vpendbaser, GICR_VPENDBASER, VALID); + bool newvalid = FIELD_EX64(newval, GICR_VPENDBASER, VALID); + bool pendinglast; + + /* + * The DIRTY bit is read-only and for us is always zero; + * other fields are writeable. + */ + newval &= R_GICR_VPENDBASER_INNERCACHE_MASK | + R_GICR_VPENDBASER_SHAREABILITY_MASK | + R_GICR_VPENDBASER_PHYADDR_MASK | + R_GICR_VPENDBASER_OUTERCACHE_MASK | + R_GICR_VPENDBASER_PENDINGLAST_MASK | + R_GICR_VPENDBASER_IDAI_MASK | + R_GICR_VPENDBASER_VALID_MASK; + + if (oldvalid && newvalid) { + /* + * Changing other fields while VALID is 1 is UNPREDICTABLE; + * we choose to log and ignore the write. + */ + if (cs->gicr_vpendbaser ^ newval) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Changing GICR_VPENDBASER when VALID=1 " + "is UNPREDICTABLE\n", __func__); + } + return; + } + if (!oldvalid && !newvalid) { + cs->gicr_vpendbaser = newval; + return; + } + + if (newvalid) { + /* + * Valid going from 0 to 1: update hppvlpi from tables. + * If IDAI is 0 we are allowed to use the info we cached in + * the IMPDEF area of the table. + * PendingLast is RES1 when we make this transition. + */ + pendinglast = true; + } else { + /* + * Valid going from 1 to 0: + * Set PendingLast if there was a pending enabled interrupt + * for the vPE that was just descheduled. + * If we cache info in the IMPDEF area, write it out here. + */ + pendinglast = cs->hppvlpi.prio != 0xff; + } + + newval = FIELD_DP64(newval, GICR_VPENDBASER, PENDINGLAST, pendinglast); + cs->gicr_vpendbaser = newval; + gicv3_redist_update_vlpi(cs); +} + static MemTxResult gicr_readb(GICv3CPUState *cs, hwaddr offset, uint64_t *data, MemTxAttrs attrs) { @@ -234,7 +441,24 @@ static MemTxResult gicr_readl(GICv3CPUState *cs, hwaddr offset, *data = cs->gicr_nsacr; return MEMTX_OK; case GICR_IDREGS ... GICR_IDREGS + 0x2f: - *data = gicv3_idreg(offset - GICR_IDREGS); + *data = gicv3_idreg(cs->gic, offset - GICR_IDREGS, GICV3_PIDR0_REDIST); + return MEMTX_OK; + /* + * VLPI frame registers. We don't need a version check for + * VPROPBASER and VPENDBASER because gicv3_redist_size() will + * prevent pre-v4 GIC from passing us offsets this high. + */ + case GICR_VPROPBASER: + *data = extract64(cs->gicr_vpropbaser, 0, 32); + return MEMTX_OK; + case GICR_VPROPBASER + 4: + *data = extract64(cs->gicr_vpropbaser, 32, 32); + return MEMTX_OK; + case GICR_VPENDBASER: + *data = extract64(cs->gicr_vpendbaser, 0, 32); + return MEMTX_OK; + case GICR_VPENDBASER + 4: + *data = extract64(cs->gicr_vpendbaser, 32, 32); return MEMTX_OK; default: return MEMTX_ERROR; @@ -379,6 +603,23 @@ static MemTxResult gicr_writel(GICv3CPUState *cs, hwaddr offset, "%s: invalid guest write to RO register at offset " TARGET_FMT_plx "\n", __func__, offset); return MEMTX_OK; + /* + * VLPI frame registers. We don't need a version check for + * VPROPBASER and VPENDBASER because gicv3_redist_size() will + * prevent pre-v4 GIC from passing us offsets this high. + */ + case GICR_VPROPBASER: + cs->gicr_vpropbaser = deposit64(cs->gicr_vpropbaser, 0, 32, value); + return MEMTX_OK; + case GICR_VPROPBASER + 4: + cs->gicr_vpropbaser = deposit64(cs->gicr_vpropbaser, 32, 32, value); + return MEMTX_OK; + case GICR_VPENDBASER: + gicr_write_vpendbaser(cs, deposit64(cs->gicr_vpendbaser, 0, 32, value)); + return MEMTX_OK; + case GICR_VPENDBASER + 4: + gicr_write_vpendbaser(cs, deposit64(cs->gicr_vpendbaser, 32, 32, value)); + return MEMTX_OK; default: return MEMTX_ERROR; } @@ -397,6 +638,17 @@ static MemTxResult gicr_readll(GICv3CPUState *cs, hwaddr offset, case GICR_PENDBASER: *data = cs->gicr_pendbaser; return MEMTX_OK; + /* + * VLPI frame registers. We don't need a version check for + * VPROPBASER and VPENDBASER because gicv3_redist_size() will + * prevent pre-v4 GIC from passing us offsets this high. + */ + case GICR_VPROPBASER: + *data = cs->gicr_vpropbaser; + return MEMTX_OK; + case GICR_VPENDBASER: + *data = cs->gicr_vpendbaser; + return MEMTX_OK; default: return MEMTX_ERROR; } @@ -418,6 +670,17 @@ static MemTxResult gicr_writell(GICv3CPUState *cs, hwaddr offset, "%s: invalid guest write to RO register at offset " TARGET_FMT_plx "\n", __func__, offset); return MEMTX_OK; + /* + * VLPI frame registers. We don't need a version check for + * VPROPBASER and VPENDBASER because gicv3_redist_size() will + * prevent pre-v4 GIC from passing us offsets this high. + */ + case GICR_VPROPBASER: + cs->gicr_vpropbaser = value; + return MEMTX_OK; + case GICR_VPENDBASER: + gicr_write_vpendbaser(cs, value); + return MEMTX_OK; default: return MEMTX_ERROR; } @@ -442,8 +705,8 @@ MemTxResult gicv3_redist_read(void *opaque, hwaddr offset, uint64_t *data, * in the memory map); if so then the GIC has multiple MemoryRegions * for the redistributors. */ - cpuidx = region->cpuidx + offset / GICV3_REDIST_SIZE; - offset %= GICV3_REDIST_SIZE; + cpuidx = region->cpuidx + offset / gicv3_redist_size(s); + offset %= gicv3_redist_size(s); cs = &s->cpu[cpuidx]; @@ -501,8 +764,8 @@ MemTxResult gicv3_redist_write(void *opaque, hwaddr offset, uint64_t data, * in the memory map); if so then the GIC has multiple MemoryRegions * for the redistributors. */ - cpuidx = region->cpuidx + offset / GICV3_REDIST_SIZE; - offset %= GICV3_REDIST_SIZE; + cpuidx = region->cpuidx + offset / gicv3_redist_size(s); + offset %= gicv3_redist_size(s); cs = &s->cpu[cpuidx]; @@ -542,34 +805,11 @@ MemTxResult gicv3_redist_write(void *opaque, hwaddr offset, uint64_t data, static void gicv3_redist_check_lpi_priority(GICv3CPUState *cs, int irq) { - AddressSpace *as = &cs->gic->dma_as; - uint64_t lpict_baddr; - uint8_t lpite; - uint8_t prio; - - lpict_baddr = cs->gicr_propbaser & R_GICR_PROPBASER_PHYADDR_MASK; + uint64_t lpict_baddr = cs->gicr_propbaser & R_GICR_PROPBASER_PHYADDR_MASK; - address_space_read(as, lpict_baddr + ((irq - GICV3_LPI_INTID_START) * - sizeof(lpite)), MEMTXATTRS_UNSPECIFIED, &lpite, - sizeof(lpite)); - - if (!(lpite & LPI_CTE_ENABLED)) { - return; - } - - if (cs->gic->gicd_ctlr & GICD_CTLR_DS) { - prio = lpite & LPI_PRIORITY_MASK; - } else { - prio = ((lpite & LPI_PRIORITY_MASK) >> 1) | 0x80; - } - - if ((prio < cs->hpplpi.prio) || - ((prio == cs->hpplpi.prio) && (irq <= cs->hpplpi.irq))) { - cs->hpplpi.irq = irq; - cs->hpplpi.prio = prio; - /* LPIs are always non-secure Grp1 interrupts */ - cs->hpplpi.grp = GICV3_G1NS; - } + update_for_one_lpi(cs, irq, lpict_baddr, + cs->gic->gicd_ctlr & GICD_CTLR_DS, + &cs->hpplpi); } void gicv3_redist_update_lpi_only(GICv3CPUState *cs) @@ -581,11 +821,7 @@ void gicv3_redist_update_lpi_only(GICv3CPUState *cs) * priority is lower than the last computed high priority lpi interrupt. * If yes, replace current LPI as the new high priority lpi interrupt. */ - AddressSpace *as = &cs->gic->dma_as; - uint64_t lpipt_baddr; - uint32_t pendt_size = 0; - uint8_t pend; - int i, bit; + uint64_t lpipt_baddr, lpict_baddr; uint64_t idbits; idbits = MIN(FIELD_EX64(cs->gicr_propbaser, GICR_PROPBASER, IDBITS), @@ -595,23 +831,11 @@ void gicv3_redist_update_lpi_only(GICv3CPUState *cs) return; } - cs->hpplpi.prio = 0xff; - lpipt_baddr = cs->gicr_pendbaser & R_GICR_PENDBASER_PHYADDR_MASK; + lpict_baddr = cs->gicr_propbaser & R_GICR_PROPBASER_PHYADDR_MASK; - /* Determine the highest priority pending interrupt among LPIs */ - pendt_size = (1ULL << (idbits + 1)); - - for (i = GICV3_LPI_INTID_START / 8; i < pendt_size / 8; i++) { - address_space_read(as, lpipt_baddr + i, MEMTXATTRS_UNSPECIFIED, &pend, - sizeof(pend)); - - while (pend) { - bit = ctz32(pend); - gicv3_redist_check_lpi_priority(cs, i * 8 + bit); - pend &= ~(1 << bit); - } - } + update_for_all_lpis(cs, lpipt_baddr, lpict_baddr, idbits, + cs->gic->gicd_ctlr & GICD_CTLR_DS, &cs->hpplpi); } void gicv3_redist_update_lpi(GICv3CPUState *cs) @@ -626,30 +850,13 @@ void gicv3_redist_lpi_pending(GICv3CPUState *cs, int irq, int level) * This function updates the pending bit in lpi pending table for * the irq being activated or deactivated. */ - AddressSpace *as = &cs->gic->dma_as; uint64_t lpipt_baddr; - bool ispend = false; - uint8_t pend; - /* - * get the bit value corresponding to this irq in the - * lpi pending table - */ lpipt_baddr = cs->gicr_pendbaser & R_GICR_PENDBASER_PHYADDR_MASK; - - address_space_read(as, lpipt_baddr + ((irq / 8) * sizeof(pend)), - MEMTXATTRS_UNSPECIFIED, &pend, sizeof(pend)); - - ispend = extract32(pend, irq % 8, 1); - - /* no change in the value of pending bit, return */ - if (ispend == level) { + if (!set_pending_table_bit(cs, lpipt_baddr, irq, level)) { + /* no change in the value of pending bit, return */ return; } - pend = deposit32(pend, irq % 8, 1, level ? 1 : 0); - - address_space_write(as, lpipt_baddr + ((irq / 8) * sizeof(pend)), - MEMTXATTRS_UNSPECIFIED, &pend, sizeof(pend)); /* * check if this LPI is better than the current hpplpi, if yes @@ -681,6 +888,17 @@ void gicv3_redist_process_lpi(GICv3CPUState *cs, int irq, int level) gicv3_redist_lpi_pending(cs, irq, level); } +void gicv3_redist_inv_lpi(GICv3CPUState *cs, int irq) +{ + /* + * The only cached information for LPIs we have is the HPPLPI. + * We could be cleverer about identifying when we don't need + * to do a full rescan of the pending table, but until we find + * this is a performance issue, just always recalculate. + */ + gicv3_redist_update_lpi(cs); +} + void gicv3_redist_mov_lpi(GICv3CPUState *src, GICv3CPUState *dest, int irq) { /* @@ -691,11 +909,9 @@ void gicv3_redist_mov_lpi(GICv3CPUState *src, GICv3CPUState *dest, int irq) * we choose to NOP. If LPIs are disabled on source there's nothing * to be transferred anyway. */ - AddressSpace *as = &src->gic->dma_as; uint64_t idbits; uint32_t pendt_size; uint64_t src_baddr; - uint8_t src_pend; if (!(src->gicr_ctlr & GICR_CTLR_ENABLE_LPIS) || !(dest->gicr_ctlr & GICR_CTLR_ENABLE_LPIS)) { @@ -714,15 +930,10 @@ void gicv3_redist_mov_lpi(GICv3CPUState *src, GICv3CPUState *dest, int irq) src_baddr = src->gicr_pendbaser & R_GICR_PENDBASER_PHYADDR_MASK; - address_space_read(as, src_baddr + (irq / 8), - MEMTXATTRS_UNSPECIFIED, &src_pend, sizeof(src_pend)); - if (!extract32(src_pend, irq % 8, 1)) { + if (!set_pending_table_bit(src, src_baddr, irq, 0)) { /* Not pending on source, nothing to do */ return; } - src_pend &= ~(1 << (irq % 8)); - address_space_write(as, src_baddr + (irq / 8), - MEMTXATTRS_UNSPECIFIED, &src_pend, sizeof(src_pend)); if (irq == src->hpplpi.irq) { /* * We just made this LPI not-pending so only need to update @@ -788,6 +999,117 @@ void gicv3_redist_movall_lpis(GICv3CPUState *src, GICv3CPUState *dest) gicv3_redist_update_lpi(dest); } +void gicv3_redist_vlpi_pending(GICv3CPUState *cs, int irq, int level) +{ + /* + * Change the pending state of the specified vLPI. + * Unlike gicv3_redist_process_vlpi(), we know here that the + * vCPU is definitely resident on this redistributor, and that + * the irq is in range. + */ + uint64_t vptbase, ctbase; + + vptbase = FIELD_EX64(cs->gicr_vpendbaser, GICR_VPENDBASER, PHYADDR) << 16; + + if (set_pending_table_bit(cs, vptbase, irq, level)) { + if (level) { + /* Check whether this vLPI is now the best */ + ctbase = cs->gicr_vpropbaser & R_GICR_VPROPBASER_PHYADDR_MASK; + update_for_one_lpi(cs, irq, ctbase, true, &cs->hppvlpi); + gicv3_cpuif_virt_irq_fiq_update(cs); + } else { + /* Only need to recalculate if this was previously the best vLPI */ + if (irq == cs->hppvlpi.irq) { + gicv3_redist_update_vlpi(cs); + } + } + } +} + +void gicv3_redist_process_vlpi(GICv3CPUState *cs, int irq, uint64_t vptaddr, + int doorbell, int level) +{ + bool bit_changed; + bool resident = vcpu_resident(cs, vptaddr); + uint64_t ctbase; + + if (resident) { + uint32_t idbits = FIELD_EX64(cs->gicr_vpropbaser, GICR_VPROPBASER, IDBITS); + if (irq >= (1ULL << (idbits + 1))) { + return; + } + } + + bit_changed = set_pending_table_bit(cs, vptaddr, irq, level); + if (resident && bit_changed) { + if (level) { + /* Check whether this vLPI is now the best */ + ctbase = cs->gicr_vpropbaser & R_GICR_VPROPBASER_PHYADDR_MASK; + update_for_one_lpi(cs, irq, ctbase, true, &cs->hppvlpi); + gicv3_cpuif_virt_irq_fiq_update(cs); + } else { + /* Only need to recalculate if this was previously the best vLPI */ + if (irq == cs->hppvlpi.irq) { + gicv3_redist_update_vlpi(cs); + } + } + } + + if (!resident && level && doorbell != INTID_SPURIOUS && + (cs->gicr_ctlr & GICR_CTLR_ENABLE_LPIS)) { + /* vCPU is not currently resident: ring the doorbell */ + gicv3_redist_process_lpi(cs, doorbell, 1); + } +} + +void gicv3_redist_mov_vlpi(GICv3CPUState *src, uint64_t src_vptaddr, + GICv3CPUState *dest, uint64_t dest_vptaddr, + int irq, int doorbell) +{ + /* + * Move the specified vLPI's pending state from the source redistributor + * to the destination. + */ + if (!set_pending_table_bit(src, src_vptaddr, irq, 0)) { + /* Not pending on source, nothing to do */ + return; + } + if (vcpu_resident(src, src_vptaddr) && irq == src->hppvlpi.irq) { + /* + * Update src's cached highest-priority pending vLPI if we just made + * it not-pending + */ + gicv3_redist_update_vlpi(src); + } + /* + * Mark the vLPI pending on the destination (ringing the doorbell + * if the vCPU isn't resident) + */ + gicv3_redist_process_vlpi(dest, irq, dest_vptaddr, doorbell, irq); +} + +void gicv3_redist_vinvall(GICv3CPUState *cs, uint64_t vptaddr) +{ + if (!vcpu_resident(cs, vptaddr)) { + /* We don't have anything cached if the vCPU isn't resident */ + return; + } + + /* Otherwise, our only cached information is the HPPVLPI info */ + gicv3_redist_update_vlpi(cs); +} + +void gicv3_redist_inv_vlpi(GICv3CPUState *cs, int irq, uint64_t vptaddr) +{ + /* + * The only cached information for LPIs we have is the HPPLPI. + * We could be cleverer about identifying when we don't need + * to do a full rescan of the pending table, but until we find + * this is a performance issue, just always recalculate. + */ + gicv3_redist_vinvall(cs, vptaddr); +} + void gicv3_redist_set_irq(GICv3CPUState *cs, int irq, int level) { /* Update redistributor state for a change in an external PPI input line */ diff --git a/hw/intc/gicv3_internal.h b/hw/intc/gicv3_internal.h index 2bf1baef04..29d5cdc1b6 100644 --- a/hw/intc/gicv3_internal.h +++ b/hw/intc/gicv3_internal.h @@ -77,6 +77,7 @@ * Redistributor frame offsets from RD_base */ #define GICR_SGI_OFFSET 0x10000 +#define GICR_VLPI_OFFSET 0x20000 /* * Redistributor registers, offsets from RD_base @@ -109,6 +110,10 @@ #define GICR_IGRPMODR0 (GICR_SGI_OFFSET + 0x0D00) #define GICR_NSACR (GICR_SGI_OFFSET + 0x0E00) +/* VLPI redistributor registers, offsets from VLPI_base */ +#define GICR_VPROPBASER (GICR_VLPI_OFFSET + 0x70) +#define GICR_VPENDBASER (GICR_VLPI_OFFSET + 0x78) + #define GICR_CTLR_ENABLE_LPIS (1U << 0) #define GICR_CTLR_CES (1U << 1) #define GICR_CTLR_RWP (1U << 3) @@ -143,6 +148,22 @@ FIELD(GICR_PENDBASER, PTZ, 62, 1) #define GICR_PROPBASER_IDBITS_THRESHOLD 0xd +/* These are the GICv4 VPROPBASER and VPENDBASER layouts; v4.1 is different */ +FIELD(GICR_VPROPBASER, IDBITS, 0, 5) +FIELD(GICR_VPROPBASER, INNERCACHE, 7, 3) +FIELD(GICR_VPROPBASER, SHAREABILITY, 10, 2) +FIELD(GICR_VPROPBASER, PHYADDR, 12, 40) +FIELD(GICR_VPROPBASER, OUTERCACHE, 56, 3) + +FIELD(GICR_VPENDBASER, INNERCACHE, 7, 3) +FIELD(GICR_VPENDBASER, SHAREABILITY, 10, 2) +FIELD(GICR_VPENDBASER, PHYADDR, 16, 36) +FIELD(GICR_VPENDBASER, OUTERCACHE, 56, 3) +FIELD(GICR_VPENDBASER, DIRTY, 60, 1) +FIELD(GICR_VPENDBASER, PENDINGLAST, 61, 1) +FIELD(GICR_VPENDBASER, IDAI, 62, 1) +FIELD(GICR_VPENDBASER, VALID, 63, 1) + #define ICC_CTLR_EL1_CBPR (1U << 0) #define ICC_CTLR_EL1_EOIMODE (1U << 1) #define ICC_CTLR_EL1_PMHE (1U << 6) @@ -280,6 +301,7 @@ FIELD(GITS_CTLR, ENABLED, 0, 1) FIELD(GITS_CTLR, QUIESCENT, 31, 1) FIELD(GITS_TYPER, PHYSICAL, 0, 1) +FIELD(GITS_TYPER, VIRTUAL, 1, 1) FIELD(GITS_TYPER, ITT_ENTRY_SIZE, 4, 4) FIELD(GITS_TYPER, IDBITS, 8, 5) FIELD(GITS_TYPER, DEVBITS, 13, 5) @@ -287,6 +309,7 @@ FIELD(GITS_TYPER, SEIS, 18, 1) FIELD(GITS_TYPER, PTA, 19, 1) FIELD(GITS_TYPER, CIDBITS, 32, 4) FIELD(GITS_TYPER, CIL, 36, 1) +FIELD(GITS_TYPER, VMOVP, 37, 1) #define GITS_IDREGS 0xFFD0 @@ -298,6 +321,7 @@ FIELD(GITS_TYPER, CIL, 36, 1) #define GITS_BASER_PAGESIZE_64K 2 #define GITS_BASER_TYPE_DEVICE 1ULL +#define GITS_BASER_TYPE_VPE 2ULL #define GITS_BASER_TYPE_COLLECTION 4ULL #define GITS_PAGE_SIZE_4K 0x1000 @@ -327,6 +351,13 @@ FIELD(GITS_TYPER, CIL, 36, 1) #define GITS_CMD_INVALL 0x0D #define GITS_CMD_MOVALL 0x0E #define GITS_CMD_DISCARD 0x0F +#define GITS_CMD_VMOVI 0x21 +#define GITS_CMD_VMOVP 0x22 +#define GITS_CMD_VSYNC 0x25 +#define GITS_CMD_VMAPP 0x29 +#define GITS_CMD_VMAPTI 0x2A +#define GITS_CMD_VMAPI 0x2B +#define GITS_CMD_VINVALL 0x2D /* MAPC command fields */ #define ICID_LENGTH 16 @@ -366,6 +397,46 @@ FIELD(MOVI_0, DEVICEID, 32, 32) FIELD(MOVI_1, EVENTID, 0, 32) FIELD(MOVI_2, ICID, 0, 16) +/* INV command fields */ +FIELD(INV_0, DEVICEID, 32, 32) +FIELD(INV_1, EVENTID, 0, 32) + +/* VMAPI, VMAPTI command fields */ +FIELD(VMAPTI_0, DEVICEID, 32, 32) +FIELD(VMAPTI_1, EVENTID, 0, 32) +FIELD(VMAPTI_1, VPEID, 32, 16) +FIELD(VMAPTI_2, VINTID, 0, 32) /* VMAPTI only */ +FIELD(VMAPTI_2, DOORBELL, 32, 32) + +/* VMAPP command fields */ +FIELD(VMAPP_0, ALLOC, 8, 1) /* GICv4.1 only */ +FIELD(VMAPP_0, PTZ, 9, 1) /* GICv4.1 only */ +FIELD(VMAPP_0, VCONFADDR, 16, 36) /* GICv4.1 only */ +FIELD(VMAPP_1, DEFAULT_DOORBELL, 0, 32) /* GICv4.1 only */ +FIELD(VMAPP_1, VPEID, 32, 16) +FIELD(VMAPP_2, RDBASE, 16, 36) +FIELD(VMAPP_2, V, 63, 1) +FIELD(VMAPP_3, VPTSIZE, 0, 8) /* For GICv4.0, bits [7:6] are RES0 */ +FIELD(VMAPP_3, VPTADDR, 16, 36) + +/* VMOVP command fields */ +FIELD(VMOVP_0, SEQNUM, 32, 16) /* not used for GITS_TYPER.VMOVP == 1 */ +FIELD(VMOVP_1, ITSLIST, 0, 16) /* not used for GITS_TYPER.VMOVP == 1 */ +FIELD(VMOVP_1, VPEID, 32, 16) +FIELD(VMOVP_2, RDBASE, 16, 36) +FIELD(VMOVP_2, DB, 63, 1) /* GICv4.1 only */ +FIELD(VMOVP_3, DEFAULT_DOORBELL, 0, 32) /* GICv4.1 only */ + +/* VMOVI command fields */ +FIELD(VMOVI_0, DEVICEID, 32, 32) +FIELD(VMOVI_1, EVENTID, 0, 32) +FIELD(VMOVI_1, VPEID, 32, 16) +FIELD(VMOVI_2, D, 0, 1) +FIELD(VMOVI_2, DOORBELL, 32, 32) + +/* VINVALL command fields */ +FIELD(VINVALL_1, VPEID, 32, 16) + /* * 12 bytes Interrupt translation Table Entry size * as per Table 5.3 in GICv3 spec @@ -419,6 +490,20 @@ FIELD(DTE, ITTADDR, 6, 44) FIELD(CTE, VALID, 0, 1) FIELD(CTE, RDBASE, 1, RDBASE_PROCNUM_LENGTH) +/* + * 8 bytes VPE table entry size: + * Valid = 1 bit, VPTsize = 5 bits, VPTaddr = 36 bits, RDbase = 16 bits + * + * Field sizes for Valid and size are mandated; field sizes for RDbase + * and VPT_addr are IMPDEF. + */ +#define GITS_VPE_SIZE 0x8ULL + +FIELD(VTE, VALID, 0, 1) +FIELD(VTE, VPTSIZE, 1, 5) +FIELD(VTE, VPTADDR, 6, 36) +FIELD(VTE, RDBASE, 42, RDBASE_PROCNUM_LENGTH) + /* Special interrupt IDs */ #define INTID_SECURE 1020 #define INTID_NONSECURE 1021 @@ -427,6 +512,27 @@ FIELD(CTE, RDBASE, 1, RDBASE_PROCNUM_LENGTH) /* Functions internal to the emulated GICv3 */ /** + * gicv3_redist_size: + * @s: GICv3State + * + * Return the size of the redistributor register frame in bytes + * (which depends on what GIC version this is) + */ +static inline int gicv3_redist_size(GICv3State *s) +{ + /* + * Redistributor size is controlled by the redistributor GICR_TYPER.VLPIS. + * It's the same for every redistributor in the GIC, so arbitrarily + * use the register field in the first one. + */ + if (s->cpu[0].gicr_typer & GICR_TYPER_VLPIS) { + return GICV4_REDIST_SIZE; + } else { + return GICV3_REDIST_SIZE; + } +} + +/** * gicv3_intid_is_special: * @intid: interrupt ID * @@ -490,6 +596,36 @@ MemTxResult gicv3_redist_write(void *opaque, hwaddr offset, uint64_t data, void gicv3_dist_set_irq(GICv3State *s, int irq, int level); void gicv3_redist_set_irq(GICv3CPUState *cs, int irq, int level); void gicv3_redist_process_lpi(GICv3CPUState *cs, int irq, int level); +/** + * gicv3_redist_process_vlpi: + * @cs: GICv3CPUState + * @irq: (virtual) interrupt number + * @vptaddr: (guest) address of VLPI table + * @doorbell: doorbell (physical) interrupt number (1023 for "no doorbell") + * @level: level to set @irq to + * + * Process a virtual LPI being directly injected by the ITS. This function + * will update the VLPI table specified by @vptaddr and @vptsize. If the + * vCPU corresponding to that VLPI table is currently running on + * the CPU associated with this redistributor, directly inject the VLPI + * @irq. If the vCPU is not running on this CPU, raise the doorbell + * interrupt instead. + */ +void gicv3_redist_process_vlpi(GICv3CPUState *cs, int irq, uint64_t vptaddr, + int doorbell, int level); +/** + * gicv3_redist_vlpi_pending: + * @cs: GICv3CPUState + * @irq: (virtual) interrupt number + * @level: level to set @irq to + * + * Set/clear the pending status of a virtual LPI in the vLPI table + * that this redistributor is currently using. (The difference between + * this and gicv3_redist_process_vlpi() is that this is called from + * the cpuif and does not need to do the not-running-on-this-vcpu checks.) + */ +void gicv3_redist_vlpi_pending(GICv3CPUState *cs, int irq, int level); + void gicv3_redist_lpi_pending(GICv3CPUState *cs, int irq, int level); /** * gicv3_redist_update_lpi: @@ -510,6 +646,23 @@ void gicv3_redist_update_lpi(GICv3CPUState *cs); */ void gicv3_redist_update_lpi_only(GICv3CPUState *cs); /** + * gicv3_redist_inv_lpi: + * @cs: GICv3CPUState + * @irq: LPI to invalidate cached information for + * + * Forget or update any cached information associated with this LPI. + */ +void gicv3_redist_inv_lpi(GICv3CPUState *cs, int irq); +/** + * gicv3_redist_inv_vlpi: + * @cs: GICv3CPUState + * @irq: vLPI to invalidate cached information for + * @vptaddr: (guest) address of vLPI table + * + * Forget or update any cached information associated with this vLPI. + */ +void gicv3_redist_inv_vlpi(GICv3CPUState *cs, int irq, uint64_t vptaddr); +/** * gicv3_redist_mov_lpi: * @src: source redistributor * @dest: destination redistributor @@ -529,6 +682,30 @@ void gicv3_redist_mov_lpi(GICv3CPUState *src, GICv3CPUState *dest, int irq); * by the ITS MOVALL command. */ void gicv3_redist_movall_lpis(GICv3CPUState *src, GICv3CPUState *dest); +/** + * gicv3_redist_mov_vlpi: + * @src: source redistributor + * @src_vptaddr: (guest) address of source VLPI table + * @dest: destination redistributor + * @dest_vptaddr: (guest) address of destination VLPI table + * @irq: VLPI to update + * @doorbell: doorbell for destination (1023 for "no doorbell") + * + * Move the pending state of the specified VLPI from @src to @dest, + * as required by the ITS VMOVI command. + */ +void gicv3_redist_mov_vlpi(GICv3CPUState *src, uint64_t src_vptaddr, + GICv3CPUState *dest, uint64_t dest_vptaddr, + int irq, int doorbell); +/** + * gicv3_redist_vinvall: + * @cs: GICv3CPUState + * @vptaddr: address of VLPI pending table + * + * On redistributor @cs, invalidate all cached information associated + * with the vCPU defined by @vptaddr. + */ +void gicv3_redist_vinvall(GICv3CPUState *cs, uint64_t vptaddr); void gicv3_redist_send_sgi(GICv3CPUState *cs, int grp, int irq, bool ns); void gicv3_init_cpuif(GICv3State *s); @@ -544,6 +721,17 @@ void gicv3_init_cpuif(GICv3State *s); */ void gicv3_cpuif_update(GICv3CPUState *cs); +/* + * gicv3_cpuif_virt_irq_fiq_update: + * @cs: GICv3CPUState for the CPU to update + * + * Recalculate whether to assert the virtual IRQ or FIQ lines after + * a change to the current highest priority pending virtual interrupt. + * Note that this does not recalculate and change the maintenance + * interrupt status (for that, see gicv3_cpuif_virt_update()). + */ +void gicv3_cpuif_virt_irq_fiq_update(GICv3CPUState *cs); + static inline uint32_t gicv3_iidr(void) { /* Return the Implementer Identification Register value @@ -555,17 +743,34 @@ static inline uint32_t gicv3_iidr(void) return 0x43b; } -static inline uint32_t gicv3_idreg(int regoffset) +/* CoreSight PIDR0 values for ARM GICv3 implementations */ +#define GICV3_PIDR0_DIST 0x92 +#define GICV3_PIDR0_REDIST 0x93 +#define GICV3_PIDR0_ITS 0x94 + +static inline uint32_t gicv3_idreg(GICv3State *s, int regoffset, uint8_t pidr0) { /* Return the value of the CoreSight ID register at the specified * offset from the first ID register (as found in the distributor * and redistributor register banks). - * These values indicate an ARM implementation of a GICv3. + * These values indicate an ARM implementation of a GICv3 or v4. */ static const uint8_t gicd_ids[] = { - 0x44, 0x00, 0x00, 0x00, 0x92, 0xB4, 0x3B, 0x00, 0x0D, 0xF0, 0x05, 0xB1 + 0x44, 0x00, 0x00, 0x00, 0x92, 0xB4, 0x0B, 0x00, 0x0D, 0xF0, 0x05, 0xB1 }; - return gicd_ids[regoffset / 4]; + uint32_t id; + + regoffset /= 4; + + if (regoffset == 4) { + return pidr0; + } + id = gicd_ids[regoffset]; + if (regoffset == 6) { + /* PIDR2 bits [7:4] are the GIC architecture revision */ + id |= s->revision << 4; + } + return id; } /** diff --git a/hw/intc/meson.build b/hw/intc/meson.build index d6d012fb26..8b35139f82 100644 --- a/hw/intc/meson.build +++ b/hw/intc/meson.build @@ -62,3 +62,4 @@ specific_ss.add(when: ['CONFIG_KVM', 'CONFIG_XIVE'], if_true: files('spapr_xive_kvm.c')) specific_ss.add(when: 'CONFIG_GOLDFISH_PIC', if_true: files('goldfish_pic.c')) specific_ss.add(when: 'CONFIG_M68K_IRQC', if_true: files('m68k_irqc.c')) +specific_ss.add(when: 'CONFIG_NIOS2_VIC', if_true: files('nios2_vic.c')) diff --git a/hw/intc/nios2_vic.c b/hw/intc/nios2_vic.c new file mode 100644 index 0000000000..cf63212a88 --- /dev/null +++ b/hw/intc/nios2_vic.c @@ -0,0 +1,313 @@ +/* + * Vectored Interrupt Controller for nios2 processor + * + * Copyright (c) 2022 Neuroblade + * + * Interface: + * QOM property "cpu": link to the Nios2 CPU (must be set) + * Unnamed GPIO inputs 0..NIOS2_VIC_MAX_IRQ-1: input IRQ lines + * IRQ should be connected to nios2 IRQ0. + * + * Reference: "Embedded Peripherals IP User Guide + * for Intel® Quartus® Prime Design Suite: 21.4" + * Chapter 38 "Vectored Interrupt Controller Core" + * See: https://www.intel.com/content/www/us/en/docs/programmable/683130/21-4/vectored-interrupt-controller-core.html + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" + +#include "hw/irq.h" +#include "hw/qdev-properties.h" +#include "hw/sysbus.h" +#include "migration/vmstate.h" +#include "qapi/error.h" +#include "qemu/bitops.h" +#include "qemu/log.h" +#include "qom/object.h" +#include "hw/intc/nios2_vic.h" +#include "cpu.h" + + +enum { + INT_CONFIG0 = 0, + INT_CONFIG31 = 31, + INT_ENABLE = 32, + INT_ENABLE_SET = 33, + INT_ENABLE_CLR = 34, + INT_PENDING = 35, + INT_RAW_STATUS = 36, + SW_INTERRUPT = 37, + SW_INTERRUPT_SET = 38, + SW_INTERRUPT_CLR = 39, + VIC_CONFIG = 40, + VIC_STATUS = 41, + VEC_TBL_BASE = 42, + VEC_TBL_ADDR = 43, + CSR_COUNT /* Last! */ +}; + +/* Requested interrupt level (INT_CONFIG[0:5]) */ +static inline uint32_t vic_int_config_ril(const Nios2VIC *vic, int irq_num) +{ + return extract32(vic->int_config[irq_num], 0, 6); +} + +/* Requested NMI (INT_CONFIG[6]) */ +static inline uint32_t vic_int_config_rnmi(const Nios2VIC *vic, int irq_num) +{ + return extract32(vic->int_config[irq_num], 6, 1); +} + +/* Requested register set (INT_CONFIG[7:12]) */ +static inline uint32_t vic_int_config_rrs(const Nios2VIC *vic, int irq_num) +{ + return extract32(vic->int_config[irq_num], 7, 6); +} + +static inline uint32_t vic_config_vec_size(const Nios2VIC *vic) +{ + return 1 << (2 + extract32(vic->vic_config, 0, 3)); +} + +static inline uint32_t vic_int_pending(const Nios2VIC *vic) +{ + return (vic->int_raw_status | vic->sw_int) & vic->int_enable; +} + +static void vic_update_irq(Nios2VIC *vic) +{ + Nios2CPU *cpu = NIOS2_CPU(vic->cpu); + uint32_t pending = vic_int_pending(vic); + int irq = -1; + int max_ril = 0; + /* Note that if RIL is 0 for an interrupt it is effectively disabled */ + + vic->vec_tbl_addr = 0; + vic->vic_status = 0; + + if (pending == 0) { + qemu_irq_lower(vic->output_int); + return; + } + + for (int i = 0; i < NIOS2_VIC_MAX_IRQ; i++) { + if (pending & BIT(i)) { + int ril = vic_int_config_ril(vic, i); + if (ril > max_ril) { + irq = i; + max_ril = ril; + } + } + } + + if (irq < 0) { + qemu_irq_lower(vic->output_int); + return; + } + + vic->vec_tbl_addr = irq * vic_config_vec_size(vic) + vic->vec_tbl_base; + vic->vic_status = irq | BIT(31); + + /* + * In hardware, the interface between the VIC and the CPU is via the + * External Interrupt Controller interface, where the interrupt controller + * presents the CPU with a packet of data containing: + * - Requested Handler Address (RHA): 32 bits + * - Requested Register Set (RRS) : 6 bits + * - Requested Interrupt Level (RIL) : 6 bits + * - Requested NMI flag (RNMI) : 1 bit + * In our emulation, we implement this by writing the data directly to + * fields in the CPU object and then raising the IRQ line to tell + * the CPU that we've done so. + */ + + cpu->rha = vic->vec_tbl_addr; + cpu->ril = max_ril; + cpu->rrs = vic_int_config_rrs(vic, irq); + cpu->rnmi = vic_int_config_rnmi(vic, irq); + + qemu_irq_raise(vic->output_int); +} + +static void vic_set_irq(void *opaque, int irq_num, int level) +{ + Nios2VIC *vic = opaque; + + vic->int_raw_status = deposit32(vic->int_raw_status, irq_num, 1, !!level); + vic_update_irq(vic); +} + +static void nios2_vic_reset(DeviceState *dev) +{ + Nios2VIC *vic = NIOS2_VIC(dev); + + memset(&vic->int_config, 0, sizeof(vic->int_config)); + vic->vic_config = 0; + vic->int_raw_status = 0; + vic->int_enable = 0; + vic->sw_int = 0; + vic->vic_status = 0; + vic->vec_tbl_base = 0; + vic->vec_tbl_addr = 0; +} + +static uint64_t nios2_vic_csr_read(void *opaque, hwaddr offset, unsigned size) +{ + Nios2VIC *vic = opaque; + int index = offset / 4; + + switch (index) { + case INT_CONFIG0 ... INT_CONFIG31: + return vic->int_config[index - INT_CONFIG0]; + case INT_ENABLE: + return vic->int_enable; + case INT_PENDING: + return vic_int_pending(vic); + case INT_RAW_STATUS: + return vic->int_raw_status; + case SW_INTERRUPT: + return vic->sw_int; + case VIC_CONFIG: + return vic->vic_config; + case VIC_STATUS: + return vic->vic_status; + case VEC_TBL_BASE: + return vic->vec_tbl_base; + case VEC_TBL_ADDR: + return vic->vec_tbl_addr; + default: + return 0; + } +} + +static void nios2_vic_csr_write(void *opaque, hwaddr offset, uint64_t value, + unsigned size) +{ + Nios2VIC *vic = opaque; + int index = offset / 4; + + switch (index) { + case INT_CONFIG0 ... INT_CONFIG31: + vic->int_config[index - INT_CONFIG0] = value; + break; + case INT_ENABLE: + vic->int_enable = value; + break; + case INT_ENABLE_SET: + vic->int_enable |= value; + break; + case INT_ENABLE_CLR: + vic->int_enable &= ~value; + break; + case SW_INTERRUPT: + vic->sw_int = value; + break; + case SW_INTERRUPT_SET: + vic->sw_int |= value; + break; + case SW_INTERRUPT_CLR: + vic->sw_int &= ~value; + break; + case VIC_CONFIG: + vic->vic_config = value; + break; + case VEC_TBL_BASE: + vic->vec_tbl_base = value; + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "nios2-vic: write to invalid CSR address %#" + HWADDR_PRIx "\n", offset); + } + + vic_update_irq(vic); +} + +static const MemoryRegionOps nios2_vic_csr_ops = { + .read = nios2_vic_csr_read, + .write = nios2_vic_csr_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid = { .min_access_size = 4, .max_access_size = 4 } +}; + +static void nios2_vic_realize(DeviceState *dev, Error **errp) +{ + Nios2VIC *vic = NIOS2_VIC(dev); + + if (!vic->cpu) { + /* This is a programming error in the code using this device */ + error_setg(errp, "nios2-vic 'cpu' link property was not set"); + return; + } + + sysbus_init_irq(SYS_BUS_DEVICE(dev), &vic->output_int); + qdev_init_gpio_in(dev, vic_set_irq, NIOS2_VIC_MAX_IRQ); + + memory_region_init_io(&vic->csr, OBJECT(dev), &nios2_vic_csr_ops, vic, + "nios2.vic.csr", CSR_COUNT * sizeof(uint32_t)); + sysbus_init_mmio(SYS_BUS_DEVICE(dev), &vic->csr); +} + +static Property nios2_vic_properties[] = { + DEFINE_PROP_LINK("cpu", Nios2VIC, cpu, TYPE_CPU, CPUState *), + DEFINE_PROP_END_OF_LIST() +}; + +static const VMStateDescription nios2_vic_vmstate = { + .name = "nios2-vic", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]){ + VMSTATE_UINT32_ARRAY(int_config, Nios2VIC, 32), + VMSTATE_UINT32(vic_config, Nios2VIC), + VMSTATE_UINT32(int_raw_status, Nios2VIC), + VMSTATE_UINT32(int_enable, Nios2VIC), + VMSTATE_UINT32(sw_int, Nios2VIC), + VMSTATE_UINT32(vic_status, Nios2VIC), + VMSTATE_UINT32(vec_tbl_base, Nios2VIC), + VMSTATE_UINT32(vec_tbl_addr, Nios2VIC), + VMSTATE_END_OF_LIST() + }, +}; + +static void nios2_vic_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->reset = nios2_vic_reset; + dc->realize = nios2_vic_realize; + dc->vmsd = &nios2_vic_vmstate; + device_class_set_props(dc, nios2_vic_properties); +} + +static const TypeInfo nios2_vic_info = { + .name = TYPE_NIOS2_VIC, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(Nios2VIC), + .class_init = nios2_vic_class_init, +}; + +static void nios2_vic_register_types(void) +{ + type_register_static(&nios2_vic_info); +} + +type_init(nios2_vic_register_types); diff --git a/hw/intc/riscv_aclint.c b/hw/intc/riscv_aclint.c index e43b050e92..0412edc982 100644 --- a/hw/intc/riscv_aclint.c +++ b/hw/intc/riscv_aclint.c @@ -38,12 +38,18 @@ typedef struct riscv_aclint_mtimer_callback { int num; } riscv_aclint_mtimer_callback; -static uint64_t cpu_riscv_read_rtc(uint32_t timebase_freq) +static uint64_t cpu_riscv_read_rtc_raw(uint32_t timebase_freq) { return muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), timebase_freq, NANOSECONDS_PER_SECOND); } +static uint64_t cpu_riscv_read_rtc(void *opaque) +{ + RISCVAclintMTimerState *mtimer = opaque; + return cpu_riscv_read_rtc_raw(mtimer->timebase_freq) + mtimer->time_delta; +} + /* * Called when timecmp is written to update the QEMU timer or immediately * trigger timer interrupt if mtimecmp <= current timer value. @@ -51,13 +57,13 @@ static uint64_t cpu_riscv_read_rtc(uint32_t timebase_freq) static void riscv_aclint_mtimer_write_timecmp(RISCVAclintMTimerState *mtimer, RISCVCPU *cpu, int hartid, - uint64_t value, - uint32_t timebase_freq) + uint64_t value) { + uint32_t timebase_freq = mtimer->timebase_freq; uint64_t next; uint64_t diff; - uint64_t rtc_r = cpu_riscv_read_rtc(timebase_freq); + uint64_t rtc_r = cpu_riscv_read_rtc(mtimer); cpu->env.timecmp = value; if (cpu->env.timecmp <= rtc_r) { @@ -126,9 +132,9 @@ static uint64_t riscv_aclint_mtimer_read(void *opaque, hwaddr addr, qemu_log_mask(LOG_GUEST_ERROR, "aclint-mtimer: invalid hartid: %zu", hartid); } else if ((addr & 0x7) == 0) { - /* timecmp_lo */ + /* timecmp_lo for RV32/RV64 or timecmp for RV64 */ uint64_t timecmp = env->timecmp; - return timecmp & 0xFFFFFFFF; + return (size == 4) ? (timecmp & 0xFFFFFFFF) : timecmp; } else if ((addr & 0x7) == 4) { /* timecmp_hi */ uint64_t timecmp = env->timecmp; @@ -139,11 +145,12 @@ static uint64_t riscv_aclint_mtimer_read(void *opaque, hwaddr addr, return 0; } } else if (addr == mtimer->time_base) { - /* time_lo */ - return cpu_riscv_read_rtc(mtimer->timebase_freq) & 0xFFFFFFFF; + /* time_lo for RV32/RV64 or timecmp for RV64 */ + uint64_t rtc = cpu_riscv_read_rtc(mtimer); + return (size == 4) ? (rtc & 0xFFFFFFFF) : rtc; } else if (addr == mtimer->time_base + 4) { /* time_hi */ - return (cpu_riscv_read_rtc(mtimer->timebase_freq) >> 32) & 0xFFFFFFFF; + return (cpu_riscv_read_rtc(mtimer) >> 32) & 0xFFFFFFFF; } qemu_log_mask(LOG_UNIMP, @@ -156,6 +163,7 @@ static void riscv_aclint_mtimer_write(void *opaque, hwaddr addr, uint64_t value, unsigned size) { RISCVAclintMTimerState *mtimer = opaque; + int i; if (addr >= mtimer->timecmp_base && addr < (mtimer->timecmp_base + (mtimer->num_harts << 3))) { @@ -167,33 +175,66 @@ static void riscv_aclint_mtimer_write(void *opaque, hwaddr addr, qemu_log_mask(LOG_GUEST_ERROR, "aclint-mtimer: invalid hartid: %zu", hartid); } else if ((addr & 0x7) == 0) { - /* timecmp_lo */ - uint64_t timecmp_hi = env->timecmp >> 32; - riscv_aclint_mtimer_write_timecmp(mtimer, RISCV_CPU(cpu), hartid, - timecmp_hi << 32 | (value & 0xFFFFFFFF), - mtimer->timebase_freq); - return; + if (size == 4) { + /* timecmp_lo for RV32/RV64 */ + uint64_t timecmp_hi = env->timecmp >> 32; + riscv_aclint_mtimer_write_timecmp(mtimer, RISCV_CPU(cpu), hartid, + timecmp_hi << 32 | (value & 0xFFFFFFFF)); + } else { + /* timecmp for RV64 */ + riscv_aclint_mtimer_write_timecmp(mtimer, RISCV_CPU(cpu), hartid, + value); + } } else if ((addr & 0x7) == 4) { - /* timecmp_hi */ - uint64_t timecmp_lo = env->timecmp; - riscv_aclint_mtimer_write_timecmp(mtimer, RISCV_CPU(cpu), hartid, - value << 32 | (timecmp_lo & 0xFFFFFFFF), - mtimer->timebase_freq); + if (size == 4) { + /* timecmp_hi for RV32/RV64 */ + uint64_t timecmp_lo = env->timecmp; + riscv_aclint_mtimer_write_timecmp(mtimer, RISCV_CPU(cpu), hartid, + value << 32 | (timecmp_lo & 0xFFFFFFFF)); + } else { + qemu_log_mask(LOG_GUEST_ERROR, + "aclint-mtimer: invalid timecmp_hi write: %08x", + (uint32_t)addr); + } } else { qemu_log_mask(LOG_UNIMP, "aclint-mtimer: invalid timecmp write: %08x", (uint32_t)addr); } return; - } else if (addr == mtimer->time_base) { - /* time_lo */ - qemu_log_mask(LOG_UNIMP, - "aclint-mtimer: time_lo write not implemented"); - return; - } else if (addr == mtimer->time_base + 4) { - /* time_hi */ - qemu_log_mask(LOG_UNIMP, - "aclint-mtimer: time_hi write not implemented"); + } else if (addr == mtimer->time_base || addr == mtimer->time_base + 4) { + uint64_t rtc_r = cpu_riscv_read_rtc_raw(mtimer->timebase_freq); + + if (addr == mtimer->time_base) { + if (size == 4) { + /* time_lo for RV32/RV64 */ + mtimer->time_delta = ((rtc_r & ~0xFFFFFFFFULL) | value) - rtc_r; + } else { + /* time for RV64 */ + mtimer->time_delta = value - rtc_r; + } + } else { + if (size == 4) { + /* time_hi for RV32/RV64 */ + mtimer->time_delta = (value << 32 | (rtc_r & 0xFFFFFFFF)) - rtc_r; + } else { + qemu_log_mask(LOG_GUEST_ERROR, + "aclint-mtimer: invalid time_hi write: %08x", + (uint32_t)addr); + return; + } + } + + /* Check if timer interrupt is triggered for each hart. */ + for (i = 0; i < mtimer->num_harts; i++) { + CPUState *cpu = qemu_get_cpu(mtimer->hartid_base + i); + CPURISCVState *env = cpu ? cpu->env_ptr : NULL; + if (!env) { + continue; + } + riscv_aclint_mtimer_write_timecmp(mtimer, RISCV_CPU(cpu), + i, env->timecmp); + } return; } @@ -208,6 +249,10 @@ static const MemoryRegionOps riscv_aclint_mtimer_ops = { .valid = { .min_access_size = 4, .max_access_size = 8 + }, + .impl = { + .min_access_size = 4, + .max_access_size = 8, } }; @@ -248,11 +293,29 @@ static void riscv_aclint_mtimer_realize(DeviceState *dev, Error **errp) } } +static void riscv_aclint_mtimer_reset_enter(Object *obj, ResetType type) +{ + /* + * According to RISC-V ACLINT spec: + * - On MTIMER device reset, the MTIME register is cleared to zero. + * - On MTIMER device reset, the MTIMECMP registers are in unknown state. + */ + RISCVAclintMTimerState *mtimer = RISCV_ACLINT_MTIMER(obj); + + /* + * Clear mtime register by writing to 0 it. + * Pending mtime interrupts will also be cleared at the same time. + */ + riscv_aclint_mtimer_write(mtimer, mtimer->time_base, 0, 8); +} + static void riscv_aclint_mtimer_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->realize = riscv_aclint_mtimer_realize; device_class_set_props(dc, riscv_aclint_mtimer_properties); + ResettableClass *rc = RESETTABLE_CLASS(klass); + rc->phases.enter = riscv_aclint_mtimer_reset_enter; } static const TypeInfo riscv_aclint_mtimer_info = { @@ -299,7 +362,7 @@ DeviceState *riscv_aclint_mtimer_create(hwaddr addr, hwaddr size, continue; } if (provide_rdtime) { - riscv_cpu_set_rdtime_fn(env, cpu_riscv_read_rtc, timebase_freq); + riscv_cpu_set_rdtime_fn(env, cpu_riscv_read_rtc, dev); } cb->s = RISCV_ACLINT_MTIMER(dev); @@ -407,11 +470,32 @@ static void riscv_aclint_swi_realize(DeviceState *dev, Error **errp) } } +static void riscv_aclint_swi_reset_enter(Object *obj, ResetType type) +{ + /* + * According to RISC-V ACLINT spec: + * - On MSWI device reset, each MSIP register is cleared to zero. + * + * p.s. SSWI device reset does nothing since SETSIP register always reads 0. + */ + RISCVAclintSwiState *swi = RISCV_ACLINT_SWI(obj); + int i; + + if (!swi->sswi) { + for (i = 0; i < swi->num_harts; i++) { + /* Clear MSIP registers by lowering software interrupts. */ + qemu_irq_lower(swi->soft_irqs[i]); + } + } +} + static void riscv_aclint_swi_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->realize = riscv_aclint_swi_realize; device_class_set_props(dc, riscv_aclint_swi_properties); + ResettableClass *rc = RESETTABLE_CLASS(klass); + rc->phases.enter = riscv_aclint_swi_reset_enter; } static const TypeInfo riscv_aclint_swi_info = { diff --git a/hw/intc/trace-events b/hw/intc/trace-events index 53414aa197..5271590304 100644 --- a/hw/intc/trace-events +++ b/hw/intc/trace-events @@ -151,8 +151,9 @@ gicv3_icv_hppir_read(int grp, uint32_t cpu, uint64_t val) "GICv3 ICV_HPPIR%d rea gicv3_icv_dir_write(uint32_t cpu, uint64_t val) "GICv3 ICV_DIR write cpu 0x%x value 0x%" PRIx64 gicv3_icv_iar_read(int grp, uint32_t cpu, uint64_t val) "GICv3 ICV_IAR%d read cpu 0x%x value 0x%" PRIx64 gicv3_icv_eoir_write(int grp, uint32_t cpu, uint64_t val) "GICv3 ICV_EOIR%d write cpu 0x%x value 0x%" PRIx64 -gicv3_cpuif_virt_update(uint32_t cpuid, int idx) "GICv3 CPU i/f 0x%x virt HPPI update LR index %d" -gicv3_cpuif_virt_set_irqs(uint32_t cpuid, int fiqlevel, int irqlevel, int maintlevel) "GICv3 CPU i/f 0x%x virt HPPI update: setting FIQ %d IRQ %d maintenance-irq %d" +gicv3_cpuif_virt_update(uint32_t cpuid, int idx, int hppvlpi, int grp, int prio) "GICv3 CPU i/f 0x%x virt HPPI update LR index %d HPPVLPI %d grp %d prio %d" +gicv3_cpuif_virt_set_irqs(uint32_t cpuid, int fiqlevel, int irqlevel) "GICv3 CPU i/f 0x%x virt HPPI update: setting FIQ %d IRQ %d" +gicv3_cpuif_virt_set_maint_irq(uint32_t cpuid, int maintlevel) "GICv3 CPU i/f 0x%x virt HPPI update: setting maintenance-irq %d" # arm_gicv3_dist.c gicv3_dist_read(uint64_t offset, uint64_t data, unsigned size, bool secure) "GICv3 distributor read: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %u secure %d" @@ -184,9 +185,17 @@ gicv3_its_cmd_mapd(uint32_t devid, uint32_t size, uint64_t ittaddr, int valid) " gicv3_its_cmd_mapc(uint32_t icid, uint64_t rdbase, int valid) "GICv3 ITS: command MAPC ICID 0x%x RDbase 0x%" PRIx64 " V %d" gicv3_its_cmd_mapi(uint32_t devid, uint32_t eventid, uint32_t icid) "GICv3 ITS: command MAPI DeviceID 0x%x EventID 0x%x ICID 0x%x" gicv3_its_cmd_mapti(uint32_t devid, uint32_t eventid, uint32_t icid, uint32_t intid) "GICv3 ITS: command MAPTI DeviceID 0x%x EventID 0x%x ICID 0x%x pINTID 0x%x" -gicv3_its_cmd_inv(void) "GICv3 ITS: command INV or INVALL" +gicv3_its_cmd_inv(uint32_t devid, uint32_t eventid) "GICv3 ITS: command INV DeviceID 0x%x EventID 0x%x" +gicv3_its_cmd_invall(void) "GICv3 ITS: command INVALL" gicv3_its_cmd_movall(uint64_t rd1, uint64_t rd2) "GICv3 ITS: command MOVALL RDbase1 0x%" PRIx64 " RDbase2 0x%" PRIx64 gicv3_its_cmd_movi(uint32_t devid, uint32_t eventid, uint32_t icid) "GICv3 ITS: command MOVI DeviceID 0x%x EventID 0x%x ICID 0x%x" +gicv3_its_cmd_vmapi(uint32_t devid, uint32_t eventid, uint32_t vpeid, uint32_t doorbell) "GICv3 ITS: command VMAPI DeviceID 0x%x EventID 0x%x vPEID 0x%x Dbell_pINTID 0x%x" +gicv3_its_cmd_vmapti(uint32_t devid, uint32_t eventid, uint32_t vpeid, uint32_t vintid, uint32_t doorbell) "GICv3 ITS: command VMAPI DeviceID 0x%x EventID 0x%x vPEID 0x%x vINTID 0x%x Dbell_pINTID 0x%x" +gicv3_its_cmd_vmapp(uint32_t vpeid, uint64_t rdbase, int valid, uint64_t vptaddr, uint32_t vptsize) "GICv3 ITS: command VMAPP vPEID 0x%x RDbase 0x%" PRIx64 " V %d VPT_addr 0x%" PRIx64 " VPT_size 0x%x" +gicv3_its_cmd_vmovp(uint32_t vpeid, uint64_t rdbase) "GICv3 ITS: command VMOVP vPEID 0x%x RDbase 0x%" PRIx64 +gicv3_its_cmd_vsync(void) "GICv3 ITS: command VSYNC" +gicv3_its_cmd_vmovi(uint32_t devid, uint32_t eventid, uint32_t vpeid, int dbvalid, uint32_t doorbell) "GICv3 ITS: command VMOVI DeviceID 0x%x EventID 0x%x vPEID 0x%x D %d Dbell_pINTID 0x%x" +gicv3_its_cmd_vinvall(uint32_t vpeid) "GICv3 ITS: command VINVALL vPEID 0x%x" gicv3_its_cmd_unknown(unsigned cmd) "GICv3 ITS: unknown command 0x%x" gicv3_its_cte_read(uint32_t icid, int valid, uint32_t rdbase) "GICv3 ITS: Collection Table read for ICID 0x%x: valid %d RDBase 0x%x" gicv3_its_cte_write(uint32_t icid, int valid, uint32_t rdbase) "GICv3 ITS: Collection Table write for ICID 0x%x: valid %d RDBase 0x%x" @@ -197,6 +206,9 @@ gicv3_its_ite_write(uint64_t ittaddr, uint32_t eventid, int valid, int inttype, gicv3_its_dte_read(uint32_t devid, int valid, uint32_t size, uint64_t ittaddr) "GICv3 ITS: Device Table read for DeviceID 0x%x: valid %d size 0x%x ITTaddr 0x%" PRIx64 gicv3_its_dte_write(uint32_t devid, int valid, uint32_t size, uint64_t ittaddr) "GICv3 ITS: Device Table write for DeviceID 0x%x: valid %d size 0x%x ITTaddr 0x%" PRIx64 gicv3_its_dte_read_fault(uint32_t devid) "GICv3 ITS: Device Table read for DeviceID 0x%x: faulted" +gicv3_its_vte_read(uint32_t vpeid, int valid, uint32_t vptsize, uint64_t vptaddr, uint32_t rdbase) "GICv3 ITS: vPE Table read for vPEID 0x%x: valid %d VPTsize 0x%x VPTaddr 0x%" PRIx64 " RDbase 0x%x" +gicv3_its_vte_read_fault(uint32_t vpeid) "GICv3 ITS: vPE Table read for vPEID 0x%x: faulted" +gicv3_its_vte_write(uint32_t vpeid, int valid, uint32_t vptsize, uint64_t vptaddr, uint32_t rdbase) "GICv3 ITS: vPE Table write for vPEID 0x%x: valid %d VPTsize 0x%x VPTaddr 0x%" PRIx64 " RDbase 0x%x" # armv7m_nvic.c nvic_recompute_state(int vectpending, int vectpending_prio, int exception_prio) "NVIC state recomputed: vectpending %d vectpending_prio %d exception_prio %d" diff --git a/hw/nios2/10m50_devboard.c b/hw/nios2/10m50_devboard.c index 3d1205b8bd..91383fb097 100644 --- a/hw/nios2/10m50_devboard.c +++ b/hw/nios2/10m50_devboard.c @@ -27,6 +27,7 @@ #include "hw/sysbus.h" #include "hw/char/serial.h" +#include "hw/intc/nios2_vic.h" #include "hw/qdev-properties.h" #include "sysemu/sysemu.h" #include "hw/boards.h" @@ -36,17 +37,28 @@ #include "boot.h" +struct Nios2MachineState { + MachineState parent_obj; + + MemoryRegion phys_tcm; + MemoryRegion phys_tcm_alias; + MemoryRegion phys_ram; + MemoryRegion phys_ram_alias; + + bool vic; +}; + +#define TYPE_NIOS2_MACHINE MACHINE_TYPE_NAME("10m50-ghrd") +OBJECT_DECLARE_TYPE(Nios2MachineState, MachineClass, NIOS2_MACHINE) + #define BINARY_DEVICE_TREE_FILE "10m50-devboard.dtb" static void nios2_10m50_ghrd_init(MachineState *machine) { + Nios2MachineState *nms = NIOS2_MACHINE(machine); Nios2CPU *cpu; DeviceState *dev; MemoryRegion *address_space_mem = get_system_memory(); - MemoryRegion *phys_tcm = g_new(MemoryRegion, 1); - MemoryRegion *phys_tcm_alias = g_new(MemoryRegion, 1); - MemoryRegion *phys_ram = g_new(MemoryRegion, 1); - MemoryRegion *phys_ram_alias = g_new(MemoryRegion, 1); ram_addr_t tcm_base = 0x0; ram_addr_t tcm_size = 0x1000; /* 1 kiB, but QEMU limit is 4 kiB */ ram_addr_t ram_base = 0x08000000; @@ -55,27 +67,56 @@ static void nios2_10m50_ghrd_init(MachineState *machine) int i; /* Physical TCM (tb_ram_1k) with alias at 0xc0000000 */ - memory_region_init_ram(phys_tcm, NULL, "nios2.tcm", tcm_size, + memory_region_init_ram(&nms->phys_tcm, NULL, "nios2.tcm", tcm_size, &error_abort); - memory_region_init_alias(phys_tcm_alias, NULL, "nios2.tcm.alias", - phys_tcm, 0, tcm_size); - memory_region_add_subregion(address_space_mem, tcm_base, phys_tcm); + memory_region_init_alias(&nms->phys_tcm_alias, NULL, "nios2.tcm.alias", + &nms->phys_tcm, 0, tcm_size); + memory_region_add_subregion(address_space_mem, tcm_base, &nms->phys_tcm); memory_region_add_subregion(address_space_mem, 0xc0000000 + tcm_base, - phys_tcm_alias); + &nms->phys_tcm_alias); /* Physical DRAM with alias at 0xc0000000 */ - memory_region_init_ram(phys_ram, NULL, "nios2.ram", ram_size, + memory_region_init_ram(&nms->phys_ram, NULL, "nios2.ram", ram_size, &error_abort); - memory_region_init_alias(phys_ram_alias, NULL, "nios2.ram.alias", - phys_ram, 0, ram_size); - memory_region_add_subregion(address_space_mem, ram_base, phys_ram); + memory_region_init_alias(&nms->phys_ram_alias, NULL, "nios2.ram.alias", + &nms->phys_ram, 0, ram_size); + memory_region_add_subregion(address_space_mem, ram_base, &nms->phys_ram); memory_region_add_subregion(address_space_mem, 0xc0000000 + ram_base, - phys_ram_alias); + &nms->phys_ram_alias); - /* Create CPU -- FIXME */ - cpu = NIOS2_CPU(cpu_create(TYPE_NIOS2_CPU)); - for (i = 0; i < 32; i++) { - irq[i] = qdev_get_gpio_in_named(DEVICE(cpu), "IRQ", i); + /* Create CPU. We need to set eic_present between init and realize. */ + cpu = NIOS2_CPU(object_new(TYPE_NIOS2_CPU)); + + /* Enable the External Interrupt Controller within the CPU. */ + cpu->eic_present = nms->vic; + + /* Configure new exception vectors. */ + cpu->reset_addr = 0xd4000000; + cpu->exception_addr = 0xc8000120; + cpu->fast_tlb_miss_addr = 0xc0000100; + + qdev_realize_and_unref(DEVICE(cpu), NULL, &error_fatal); + + if (nms->vic) { + DeviceState *dev = qdev_new(TYPE_NIOS2_VIC); + MemoryRegion *dev_mr; + qemu_irq cpu_irq; + + object_property_set_link(OBJECT(dev), "cpu", OBJECT(cpu), &error_fatal); + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + + cpu_irq = qdev_get_gpio_in_named(DEVICE(cpu), "EIC", 0); + sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, cpu_irq); + for (int i = 0; i < 32; i++) { + irq[i] = qdev_get_gpio_in(dev, i); + } + + dev_mr = sysbus_mmio_get_region(SYS_BUS_DEVICE(dev), 0); + memory_region_add_subregion(address_space_mem, 0x18002000, dev_mr); + } else { + for (i = 0; i < 32; i++) { + irq[i] = qdev_get_gpio_in_named(DEVICE(cpu), "IRQ", i); + } } /* Register: Altera 16550 UART */ @@ -96,20 +137,44 @@ static void nios2_10m50_ghrd_init(MachineState *machine) sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, 0xe0000880); sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, irq[5]); - /* Configure new exception vectors and reset CPU for it to take effect. */ - cpu->reset_addr = 0xd4000000; - cpu->exception_addr = 0xc8000120; - cpu->fast_tlb_miss_addr = 0xc0000100; - nios2_load_kernel(cpu, ram_base, ram_size, machine->initrd_filename, BINARY_DEVICE_TREE_FILE, NULL); } -static void nios2_10m50_ghrd_machine_init(struct MachineClass *mc) +static bool get_vic(Object *obj, Error **errp) +{ + Nios2MachineState *nms = NIOS2_MACHINE(obj); + return nms->vic; +} + +static void set_vic(Object *obj, bool value, Error **errp) +{ + Nios2MachineState *nms = NIOS2_MACHINE(obj); + nms->vic = value; +} + +static void nios2_10m50_ghrd_class_init(ObjectClass *oc, void *data) { + MachineClass *mc = MACHINE_CLASS(oc); + mc->desc = "Altera 10M50 GHRD Nios II design"; mc->init = nios2_10m50_ghrd_init; mc->is_default = true; + + object_class_property_add_bool(oc, "vic", get_vic, set_vic); + object_class_property_set_description(oc, "vic", + "Set on/off to enable/disable the Vectored Interrupt Controller"); } -DEFINE_MACHINE("10m50-ghrd", nios2_10m50_ghrd_machine_init); +static const TypeInfo nios2_10m50_ghrd_type_info = { + .name = TYPE_NIOS2_MACHINE, + .parent = TYPE_MACHINE, + .instance_size = sizeof(Nios2MachineState), + .class_init = nios2_10m50_ghrd_class_init, +}; + +static void nios2_10m50_ghrd_type_init(void) +{ + type_register_static(&nios2_10m50_ghrd_type_info); +} +type_init(nios2_10m50_ghrd_type_init); diff --git a/hw/nios2/Kconfig b/hw/nios2/Kconfig index b10ea640da..4748ae27b6 100644 --- a/hw/nios2/Kconfig +++ b/hw/nios2/Kconfig @@ -3,6 +3,7 @@ config NIOS2_10M50 select NIOS2 select SERIAL select ALTERA_TIMER + select NIOS2_VIC config NIOS2_GENERIC_NOMMU bool diff --git a/hw/rdma/vmw/pvrdma_main.c b/hw/rdma/vmw/pvrdma_main.c index 27a7622432..58db0b8e3b 100644 --- a/hw/rdma/vmw/pvrdma_main.c +++ b/hw/rdma/vmw/pvrdma_main.c @@ -159,13 +159,13 @@ static void free_dsr(PVRDMADev *dev) free_dev_ring(pci_dev, &dev->dsr_info.cq, dev->dsr_info.cq_ring_state); rdma_pci_dma_unmap(pci_dev, dev->dsr_info.req, - sizeof(union pvrdma_cmd_req)); + sizeof(union pvrdma_cmd_req)); rdma_pci_dma_unmap(pci_dev, dev->dsr_info.rsp, - sizeof(union pvrdma_cmd_resp)); + sizeof(union pvrdma_cmd_resp)); rdma_pci_dma_unmap(pci_dev, dev->dsr_info.dsr, - sizeof(struct pvrdma_device_shared_region)); + sizeof(struct pvrdma_device_shared_region)); dev->dsr_info.dsr = NULL; } @@ -249,7 +249,8 @@ static void init_dsr_dev_caps(PVRDMADev *dev) { struct pvrdma_device_shared_region *dsr; - if (dev->dsr_info.dsr == NULL) { + if (!dev->dsr_info.dsr) { + /* Buggy or malicious guest driver */ rdma_error_report("Can't initialized DSR"); return; } diff --git a/hw/riscv/boot.c b/hw/riscv/boot.c index 0f179d3601..57a41df8e9 100644 --- a/hw/riscv/boot.c +++ b/hw/riscv/boot.c @@ -212,9 +212,9 @@ hwaddr riscv_load_initrd(const char *filename, uint64_t mem_size, return *start + size; } -uint32_t riscv_load_fdt(hwaddr dram_base, uint64_t mem_size, void *fdt) +uint64_t riscv_load_fdt(hwaddr dram_base, uint64_t mem_size, void *fdt) { - uint32_t temp, fdt_addr; + uint64_t temp, fdt_addr; hwaddr dram_end = dram_base + mem_size; int ret, fdtsize = fdt_totalsize(fdt); @@ -229,7 +229,7 @@ uint32_t riscv_load_fdt(hwaddr dram_base, uint64_t mem_size, void *fdt) * Thus, put it at an 16MB aligned address that less than fdt size from the * end of dram or 3GB whichever is lesser. */ - temp = MIN(dram_end, 3072 * MiB); + temp = (dram_base < 3072 * MiB) ? MIN(dram_end, 3072 * MiB) : dram_end; fdt_addr = QEMU_ALIGN_DOWN(temp - fdtsize, 16 * MiB); ret = fdt_pack(fdt); @@ -285,13 +285,15 @@ void riscv_setup_rom_reset_vec(MachineState *machine, RISCVHartArrayState *harts hwaddr start_addr, hwaddr rom_base, hwaddr rom_size, uint64_t kernel_entry, - uint32_t fdt_load_addr, void *fdt) + uint64_t fdt_load_addr, void *fdt) { int i; uint32_t start_addr_hi32 = 0x00000000; + uint32_t fdt_load_addr_hi32 = 0x00000000; if (!riscv_is_32bit(harts)) { start_addr_hi32 = start_addr >> 32; + fdt_load_addr_hi32 = fdt_load_addr >> 32; } /* reset vector */ uint32_t reset_vec[10] = { @@ -304,7 +306,7 @@ void riscv_setup_rom_reset_vec(MachineState *machine, RISCVHartArrayState *harts start_addr, /* start: .dword */ start_addr_hi32, fdt_load_addr, /* fdt_laddr: .dword */ - 0x00000000, + fdt_load_addr_hi32, /* fw_dyn: */ }; if (riscv_is_32bit(harts)) { diff --git a/hw/riscv/opentitan.c b/hw/riscv/opentitan.c index 833624d66c..2d401dcb23 100644 --- a/hw/riscv/opentitan.c +++ b/hw/riscv/opentitan.c @@ -120,11 +120,18 @@ static void lowrisc_ibex_soc_init(Object *obj) object_initialize_child(obj, "uart", &s->uart, TYPE_IBEX_UART); object_initialize_child(obj, "timer", &s->timer, TYPE_IBEX_TIMER); + + for (int i = 0; i < OPENTITAN_NUM_SPI_HOSTS; i++) { + object_initialize_child(obj, "spi_host[*]", &s->spi_host[i], + TYPE_IBEX_SPI_HOST); + } } static void lowrisc_ibex_soc_realize(DeviceState *dev_soc, Error **errp) { const MemMapEntry *memmap = ibex_memmap; + DeviceState *dev; + SysBusDevice *busdev; MachineState *ms = MACHINE(qdev_get_machine()); LowRISCIbexSoCState *s = RISCV_IBEX_SOC(dev_soc); MemoryRegion *sys_mem = get_system_memory(); @@ -209,14 +216,35 @@ static void lowrisc_ibex_soc_realize(DeviceState *dev_soc, Error **errp) qdev_get_gpio_in(DEVICE(qemu_get_cpu(0)), IRQ_M_TIMER)); + /* SPI-Hosts */ + for (int i = 0; i < OPENTITAN_NUM_SPI_HOSTS; ++i) { + dev = DEVICE(&(s->spi_host[i])); + if (!sysbus_realize(SYS_BUS_DEVICE(&s->spi_host[i]), errp)) { + return; + } + busdev = SYS_BUS_DEVICE(dev); + sysbus_mmio_map(busdev, 0, memmap[IBEX_DEV_SPI_HOST0 + i].base); + + switch (i) { + case OPENTITAN_SPI_HOST0: + sysbus_connect_irq(busdev, 0, qdev_get_gpio_in(DEVICE(&s->plic), + IBEX_SPI_HOST0_ERR_IRQ)); + sysbus_connect_irq(busdev, 1, qdev_get_gpio_in(DEVICE(&s->plic), + IBEX_SPI_HOST0_SPI_EVENT_IRQ)); + break; + case OPENTITAN_SPI_HOST1: + sysbus_connect_irq(busdev, 0, qdev_get_gpio_in(DEVICE(&s->plic), + IBEX_SPI_HOST1_ERR_IRQ)); + sysbus_connect_irq(busdev, 1, qdev_get_gpio_in(DEVICE(&s->plic), + IBEX_SPI_HOST1_SPI_EVENT_IRQ)); + break; + } + } + create_unimplemented_device("riscv.lowrisc.ibex.gpio", memmap[IBEX_DEV_GPIO].base, memmap[IBEX_DEV_GPIO].size); create_unimplemented_device("riscv.lowrisc.ibex.spi_device", memmap[IBEX_DEV_SPI_DEVICE].base, memmap[IBEX_DEV_SPI_DEVICE].size); - create_unimplemented_device("riscv.lowrisc.ibex.spi_host0", - memmap[IBEX_DEV_SPI_HOST0].base, memmap[IBEX_DEV_SPI_HOST0].size); - create_unimplemented_device("riscv.lowrisc.ibex.spi_host1", - memmap[IBEX_DEV_SPI_HOST1].base, memmap[IBEX_DEV_SPI_HOST1].size); create_unimplemented_device("riscv.lowrisc.ibex.i2c", memmap[IBEX_DEV_I2C].base, memmap[IBEX_DEV_I2C].size); create_unimplemented_device("riscv.lowrisc.ibex.pattgen", diff --git a/hw/riscv/virt.c b/hw/riscv/virt.c index da50cbed43..b49c5361bd 100644 --- a/hw/riscv/virt.c +++ b/hw/riscv/virt.c @@ -230,8 +230,14 @@ static void create_fdt_socket_cpus(RISCVVirtState *s, int socket, cpu_name = g_strdup_printf("/cpus/cpu@%d", s->soc[socket].hartid_base + cpu); qemu_fdt_add_subnode(mc->fdt, cpu_name); - qemu_fdt_setprop_string(mc->fdt, cpu_name, "mmu-type", - (is_32_bit) ? "riscv,sv32" : "riscv,sv48"); + if (riscv_feature(&s->soc[socket].harts[cpu].env, + RISCV_FEATURE_MMU)) { + qemu_fdt_setprop_string(mc->fdt, cpu_name, "mmu-type", + (is_32_bit) ? "riscv,sv32" : "riscv,sv48"); + } else { + qemu_fdt_setprop_string(mc->fdt, cpu_name, "mmu-type", + "riscv,none"); + } name = riscv_isa_string(&s->soc[socket].harts[cpu]); qemu_fdt_setprop_string(mc->fdt, cpu_name, "riscv,isa", name); g_free(name); @@ -1308,12 +1314,18 @@ static void virt_machine_init(MachineState *machine) /* * Only direct boot kernel is currently supported for KVM VM, - * so the "-bios" parameter is ignored and treated like "-bios none" - * when KVM is enabled. + * so the "-bios" parameter is not supported when KVM is enabled. */ if (kvm_enabled()) { - g_free(machine->firmware); - machine->firmware = g_strdup("none"); + if (machine->firmware) { + if (strcmp(machine->firmware, "none")) { + error_report("Machine mode firmware is not supported in " + "combination with KVM."); + exit(1); + } + } else { + machine->firmware = g_strdup("none"); + } } if (riscv_is_32bit(&s->soc[0])) { diff --git a/hw/ssi/ibex_spi_host.c b/hw/ssi/ibex_spi_host.c new file mode 100644 index 0000000000..d14580b409 --- /dev/null +++ b/hw/ssi/ibex_spi_host.c @@ -0,0 +1,612 @@ +/* + * QEMU model of the Ibex SPI Controller + * SPEC Reference: https://docs.opentitan.org/hw/ip/spi_host/doc/ + * + * Copyright (C) 2022 Western Digital + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "hw/ssi/ibex_spi_host.h" +#include "hw/irq.h" +#include "hw/qdev-properties.h" +#include "hw/qdev-properties-system.h" +#include "migration/vmstate.h" +#include "trace.h" + +REG32(INTR_STATE, 0x00) + FIELD(INTR_STATE, ERROR, 0, 1) + FIELD(INTR_STATE, SPI_EVENT, 1, 1) +REG32(INTR_ENABLE, 0x04) + FIELD(INTR_ENABLE, ERROR, 0, 1) + FIELD(INTR_ENABLE, SPI_EVENT, 1, 1) +REG32(INTR_TEST, 0x08) + FIELD(INTR_TEST, ERROR, 0, 1) + FIELD(INTR_TEST, SPI_EVENT, 1, 1) +REG32(ALERT_TEST, 0x0c) + FIELD(ALERT_TEST, FETAL_TEST, 0, 1) +REG32(CONTROL, 0x10) + FIELD(CONTROL, RX_WATERMARK, 0, 8) + FIELD(CONTROL, TX_WATERMARK, 1, 8) + FIELD(CONTROL, OUTPUT_EN, 29, 1) + FIELD(CONTROL, SW_RST, 30, 1) + FIELD(CONTROL, SPIEN, 31, 1) +REG32(STATUS, 0x14) + FIELD(STATUS, TXQD, 0, 8) + FIELD(STATUS, RXQD, 18, 8) + FIELD(STATUS, CMDQD, 16, 3) + FIELD(STATUS, RXWM, 20, 1) + FIELD(STATUS, BYTEORDER, 22, 1) + FIELD(STATUS, RXSTALL, 23, 1) + FIELD(STATUS, RXEMPTY, 24, 1) + FIELD(STATUS, RXFULL, 25, 1) + FIELD(STATUS, TXWM, 26, 1) + FIELD(STATUS, TXSTALL, 27, 1) + FIELD(STATUS, TXEMPTY, 28, 1) + FIELD(STATUS, TXFULL, 29, 1) + FIELD(STATUS, ACTIVE, 30, 1) + FIELD(STATUS, READY, 31, 1) +REG32(CONFIGOPTS, 0x18) + FIELD(CONFIGOPTS, CLKDIV_0, 0, 16) + FIELD(CONFIGOPTS, CSNIDLE_0, 16, 4) + FIELD(CONFIGOPTS, CSNTRAIL_0, 20, 4) + FIELD(CONFIGOPTS, CSNLEAD_0, 24, 4) + FIELD(CONFIGOPTS, FULLCYC_0, 29, 1) + FIELD(CONFIGOPTS, CPHA_0, 30, 1) + FIELD(CONFIGOPTS, CPOL_0, 31, 1) +REG32(CSID, 0x1c) + FIELD(CSID, CSID, 0, 32) +REG32(COMMAND, 0x20) + FIELD(COMMAND, LEN, 0, 8) + FIELD(COMMAND, CSAAT, 9, 1) + FIELD(COMMAND, SPEED, 10, 2) + FIELD(COMMAND, DIRECTION, 12, 2) +REG32(ERROR_ENABLE, 0x2c) + FIELD(ERROR_ENABLE, CMDBUSY, 0, 1) + FIELD(ERROR_ENABLE, OVERFLOW, 1, 1) + FIELD(ERROR_ENABLE, UNDERFLOW, 2, 1) + FIELD(ERROR_ENABLE, CMDINVAL, 3, 1) + FIELD(ERROR_ENABLE, CSIDINVAL, 4, 1) +REG32(ERROR_STATUS, 0x30) + FIELD(ERROR_STATUS, CMDBUSY, 0, 1) + FIELD(ERROR_STATUS, OVERFLOW, 1, 1) + FIELD(ERROR_STATUS, UNDERFLOW, 2, 1) + FIELD(ERROR_STATUS, CMDINVAL, 3, 1) + FIELD(ERROR_STATUS, CSIDINVAL, 4, 1) + FIELD(ERROR_STATUS, ACCESSINVAL, 5, 1) +REG32(EVENT_ENABLE, 0x30) + FIELD(EVENT_ENABLE, RXFULL, 0, 1) + FIELD(EVENT_ENABLE, TXEMPTY, 1, 1) + FIELD(EVENT_ENABLE, RXWM, 2, 1) + FIELD(EVENT_ENABLE, TXWM, 3, 1) + FIELD(EVENT_ENABLE, READY, 4, 1) + FIELD(EVENT_ENABLE, IDLE, 5, 1) + +static inline uint8_t div4_round_up(uint8_t dividend) +{ + return (dividend + 3) / 4; +} + +static void ibex_spi_rxfifo_reset(IbexSPIHostState *s) +{ + /* Empty the RX FIFO and assert RXEMPTY */ + fifo8_reset(&s->rx_fifo); + s->regs[IBEX_SPI_HOST_STATUS] &= ~R_STATUS_RXFULL_MASK; + s->regs[IBEX_SPI_HOST_STATUS] |= R_STATUS_RXEMPTY_MASK; +} + +static void ibex_spi_txfifo_reset(IbexSPIHostState *s) +{ + /* Empty the TX FIFO and assert TXEMPTY */ + fifo8_reset(&s->tx_fifo); + s->regs[IBEX_SPI_HOST_STATUS] &= ~R_STATUS_TXFULL_MASK; + s->regs[IBEX_SPI_HOST_STATUS] |= R_STATUS_TXEMPTY_MASK; +} + +static void ibex_spi_host_reset(DeviceState *dev) +{ + IbexSPIHostState *s = IBEX_SPI_HOST(dev); + trace_ibex_spi_host_reset("Resetting Ibex SPI"); + + /* SPI Host Register Reset */ + s->regs[IBEX_SPI_HOST_INTR_STATE] = 0x00; + s->regs[IBEX_SPI_HOST_INTR_ENABLE] = 0x00; + s->regs[IBEX_SPI_HOST_INTR_TEST] = 0x00; + s->regs[IBEX_SPI_HOST_ALERT_TEST] = 0x00; + s->regs[IBEX_SPI_HOST_CONTROL] = 0x7f; + s->regs[IBEX_SPI_HOST_STATUS] = 0x00; + s->regs[IBEX_SPI_HOST_CONFIGOPTS] = 0x00; + s->regs[IBEX_SPI_HOST_CSID] = 0x00; + s->regs[IBEX_SPI_HOST_COMMAND] = 0x00; + /* RX/TX Modelled by FIFO */ + s->regs[IBEX_SPI_HOST_RXDATA] = 0x00; + s->regs[IBEX_SPI_HOST_TXDATA] = 0x00; + + s->regs[IBEX_SPI_HOST_ERROR_ENABLE] = 0x1F; + s->regs[IBEX_SPI_HOST_ERROR_STATUS] = 0x00; + s->regs[IBEX_SPI_HOST_EVENT_ENABLE] = 0x00; + + ibex_spi_rxfifo_reset(s); + ibex_spi_txfifo_reset(s); + + s->init_status = true; + return; +} + +/* + * Check if we need to trigger an interrupt. + * The two interrupts lines (host_err and event) can + * be enabled separately in 'IBEX_SPI_HOST_INTR_ENABLE'. + * + * Interrupts are triggered based on the ones + * enabled in the `IBEX_SPI_HOST_EVENT_ENABLE` and `IBEX_SPI_HOST_ERROR_ENABLE`. + */ +static void ibex_spi_host_irq(IbexSPIHostState *s) +{ + bool error_en = s->regs[IBEX_SPI_HOST_INTR_ENABLE] + & R_INTR_ENABLE_ERROR_MASK; + bool event_en = s->regs[IBEX_SPI_HOST_INTR_ENABLE] + & R_INTR_ENABLE_SPI_EVENT_MASK; + bool err_pending = s->regs[IBEX_SPI_HOST_INTR_STATE] + & R_INTR_STATE_ERROR_MASK; + bool status_pending = s->regs[IBEX_SPI_HOST_INTR_STATE] + & R_INTR_STATE_SPI_EVENT_MASK; + int err_irq = 0, event_irq = 0; + + /* Error IRQ enabled and Error IRQ Cleared*/ + if (error_en && !err_pending) { + /* Event enabled, Interrupt Test Error */ + if (s->regs[IBEX_SPI_HOST_INTR_TEST] & R_INTR_TEST_ERROR_MASK) { + err_irq = 1; + } else if ((s->regs[IBEX_SPI_HOST_ERROR_ENABLE] + & R_ERROR_ENABLE_CMDBUSY_MASK) && + s->regs[IBEX_SPI_HOST_ERROR_STATUS] + & R_ERROR_STATUS_CMDBUSY_MASK) { + /* Wrote to COMMAND when not READY */ + err_irq = 1; + } else if ((s->regs[IBEX_SPI_HOST_ERROR_ENABLE] + & R_ERROR_ENABLE_CMDINVAL_MASK) && + s->regs[IBEX_SPI_HOST_ERROR_STATUS] + & R_ERROR_STATUS_CMDINVAL_MASK) { + /* Invalid command segment */ + err_irq = 1; + } else if ((s->regs[IBEX_SPI_HOST_ERROR_ENABLE] + & R_ERROR_ENABLE_CSIDINVAL_MASK) && + s->regs[IBEX_SPI_HOST_ERROR_STATUS] + & R_ERROR_STATUS_CSIDINVAL_MASK) { + /* Invalid value for CSID */ + err_irq = 1; + } + if (err_irq) { + s->regs[IBEX_SPI_HOST_INTR_STATE] |= R_INTR_STATE_ERROR_MASK; + } + qemu_set_irq(s->host_err, err_irq); + } + + /* Event IRQ Enabled and Event IRQ Cleared */ + if (event_en && !status_pending) { + if (s->regs[IBEX_SPI_HOST_INTR_TEST] & R_INTR_TEST_SPI_EVENT_MASK) { + /* Event enabled, Interrupt Test Event */ + event_irq = 1; + } else if ((s->regs[IBEX_SPI_HOST_EVENT_ENABLE] + & R_EVENT_ENABLE_READY_MASK) && + (s->regs[IBEX_SPI_HOST_STATUS] & R_STATUS_READY_MASK)) { + /* SPI Host ready for next command */ + event_irq = 1; + } else if ((s->regs[IBEX_SPI_HOST_EVENT_ENABLE] + & R_EVENT_ENABLE_TXEMPTY_MASK) && + (s->regs[IBEX_SPI_HOST_STATUS] & R_STATUS_TXEMPTY_MASK)) { + /* SPI TXEMPTY, TXFIFO drained */ + event_irq = 1; + } else if ((s->regs[IBEX_SPI_HOST_EVENT_ENABLE] + & R_EVENT_ENABLE_RXFULL_MASK) && + (s->regs[IBEX_SPI_HOST_STATUS] & R_STATUS_RXFULL_MASK)) { + /* SPI RXFULL, RXFIFO full */ + event_irq = 1; + } + if (event_irq) { + s->regs[IBEX_SPI_HOST_INTR_STATE] |= R_INTR_STATE_SPI_EVENT_MASK; + } + qemu_set_irq(s->event, event_irq); + } +} + +static void ibex_spi_host_transfer(IbexSPIHostState *s) +{ + uint32_t rx, tx; + /* Get num of one byte transfers */ + uint8_t segment_len = ((s->regs[IBEX_SPI_HOST_COMMAND] & R_COMMAND_LEN_MASK) + >> R_COMMAND_LEN_SHIFT); + while (segment_len > 0) { + if (fifo8_is_empty(&s->tx_fifo)) { + /* Assert Stall */ + s->regs[IBEX_SPI_HOST_STATUS] |= R_STATUS_TXSTALL_MASK; + break; + } else if (fifo8_is_full(&s->rx_fifo)) { + /* Assert Stall */ + s->regs[IBEX_SPI_HOST_STATUS] |= R_STATUS_RXSTALL_MASK; + break; + } else { + tx = fifo8_pop(&s->tx_fifo); + } + + rx = ssi_transfer(s->ssi, tx); + + trace_ibex_spi_host_transfer(tx, rx); + + if (!fifo8_is_full(&s->rx_fifo)) { + fifo8_push(&s->rx_fifo, rx); + } else { + /* Assert RXFULL */ + s->regs[IBEX_SPI_HOST_STATUS] |= R_STATUS_RXFULL_MASK; + } + --segment_len; + } + + /* Assert Ready */ + s->regs[IBEX_SPI_HOST_STATUS] |= R_STATUS_READY_MASK; + /* Set RXQD */ + s->regs[IBEX_SPI_HOST_STATUS] &= ~R_STATUS_RXQD_MASK; + s->regs[IBEX_SPI_HOST_STATUS] |= (R_STATUS_RXQD_MASK + & div4_round_up(segment_len)); + /* Set TXQD */ + s->regs[IBEX_SPI_HOST_STATUS] &= ~R_STATUS_TXQD_MASK; + s->regs[IBEX_SPI_HOST_STATUS] |= (fifo8_num_used(&s->tx_fifo) / 4) + & R_STATUS_TXQD_MASK; + /* Clear TXFULL */ + s->regs[IBEX_SPI_HOST_STATUS] &= ~R_STATUS_TXFULL_MASK; + /* Assert TXEMPTY and drop remaining bytes that exceed segment_len */ + ibex_spi_txfifo_reset(s); + /* Reset RXEMPTY */ + s->regs[IBEX_SPI_HOST_STATUS] &= ~R_STATUS_RXEMPTY_MASK; + + ibex_spi_host_irq(s); +} + +static uint64_t ibex_spi_host_read(void *opaque, hwaddr addr, + unsigned int size) +{ + IbexSPIHostState *s = opaque; + uint32_t rc = 0; + uint8_t rx_byte = 0; + + trace_ibex_spi_host_read(addr, size); + + /* Match reg index */ + addr = addr >> 2; + switch (addr) { + /* Skipping any W/O registers */ + case IBEX_SPI_HOST_INTR_STATE...IBEX_SPI_HOST_INTR_ENABLE: + case IBEX_SPI_HOST_CONTROL...IBEX_SPI_HOST_STATUS: + rc = s->regs[addr]; + break; + case IBEX_SPI_HOST_CSID: + rc = s->regs[addr]; + break; + case IBEX_SPI_HOST_CONFIGOPTS: + rc = s->config_opts[s->regs[IBEX_SPI_HOST_CSID]]; + break; + case IBEX_SPI_HOST_TXDATA: + rc = s->regs[addr]; + break; + case IBEX_SPI_HOST_RXDATA: + /* Clear RXFULL */ + s->regs[IBEX_SPI_HOST_STATUS] &= ~R_STATUS_RXFULL_MASK; + + for (int i = 0; i < 4; ++i) { + if (fifo8_is_empty(&s->rx_fifo)) { + /* Assert RXEMPTY, no IRQ */ + s->regs[IBEX_SPI_HOST_STATUS] |= R_STATUS_RXEMPTY_MASK; + s->regs[IBEX_SPI_HOST_ERROR_STATUS] |= + R_ERROR_STATUS_UNDERFLOW_MASK; + return rc; + } + rx_byte = fifo8_pop(&s->rx_fifo); + rc |= rx_byte << (i * 8); + } + break; + case IBEX_SPI_HOST_ERROR_ENABLE...IBEX_SPI_HOST_EVENT_ENABLE: + rc = s->regs[addr]; + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, "Bad offset 0x%" HWADDR_PRIx "\n", + addr << 2); + } + return rc; +} + + +static void ibex_spi_host_write(void *opaque, hwaddr addr, + uint64_t val64, unsigned int size) +{ + IbexSPIHostState *s = opaque; + uint32_t val32 = val64; + uint32_t shift_mask = 0xff; + uint8_t txqd_len; + + trace_ibex_spi_host_write(addr, size, val64); + + /* Match reg index */ + addr = addr >> 2; + + switch (addr) { + /* Skipping any R/O registers */ + case IBEX_SPI_HOST_INTR_STATE...IBEX_SPI_HOST_INTR_ENABLE: + s->regs[addr] = val32; + break; + case IBEX_SPI_HOST_INTR_TEST: + s->regs[addr] = val32; + ibex_spi_host_irq(s); + break; + case IBEX_SPI_HOST_ALERT_TEST: + s->regs[addr] = val32; + qemu_log_mask(LOG_UNIMP, + "%s: SPI_ALERT_TEST is not supported\n", __func__); + break; + case IBEX_SPI_HOST_CONTROL: + s->regs[addr] = val32; + + if (val32 & R_CONTROL_SW_RST_MASK) { + ibex_spi_host_reset((DeviceState *)s); + /* Clear active if any */ + s->regs[IBEX_SPI_HOST_STATUS] &= ~R_STATUS_ACTIVE_MASK; + } + + if (val32 & R_CONTROL_OUTPUT_EN_MASK) { + qemu_log_mask(LOG_UNIMP, + "%s: CONTROL_OUTPUT_EN is not supported\n", __func__); + } + break; + case IBEX_SPI_HOST_CONFIGOPTS: + /* Update the respective config-opts register based on CSIDth index */ + s->config_opts[s->regs[IBEX_SPI_HOST_CSID]] = val32; + qemu_log_mask(LOG_UNIMP, + "%s: CONFIGOPTS Hardware settings not supported\n", + __func__); + break; + case IBEX_SPI_HOST_CSID: + if (val32 >= s->num_cs) { + /* CSID exceeds max num_cs */ + s->regs[IBEX_SPI_HOST_ERROR_STATUS] |= + R_ERROR_STATUS_CSIDINVAL_MASK; + ibex_spi_host_irq(s); + return; + } + s->regs[addr] = val32; + break; + case IBEX_SPI_HOST_COMMAND: + s->regs[addr] = val32; + + /* STALL, IP not enabled */ + if (!(s->regs[IBEX_SPI_HOST_CONTROL] & R_CONTROL_SPIEN_MASK)) { + return; + } + + /* SPI not ready, IRQ Error */ + if (!(s->regs[IBEX_SPI_HOST_STATUS] & R_STATUS_READY_MASK)) { + s->regs[IBEX_SPI_HOST_ERROR_STATUS] |= R_ERROR_STATUS_CMDBUSY_MASK; + ibex_spi_host_irq(s); + return; + } + /* Assert Not Ready */ + s->regs[IBEX_SPI_HOST_STATUS] &= ~R_STATUS_READY_MASK; + + if (((val32 & R_COMMAND_DIRECTION_MASK) >> R_COMMAND_DIRECTION_SHIFT) + != BIDIRECTIONAL_TRANSFER) { + qemu_log_mask(LOG_UNIMP, + "%s: Rx Only/Tx Only are not supported\n", __func__); + } + + if (val32 & R_COMMAND_CSAAT_MASK) { + qemu_log_mask(LOG_UNIMP, + "%s: CSAAT is not supported\n", __func__); + } + if (val32 & R_COMMAND_SPEED_MASK) { + qemu_log_mask(LOG_UNIMP, + "%s: SPEED is not supported\n", __func__); + } + + /* Set Transfer Callback */ + timer_mod(s->fifo_trigger_handle, + qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + + (TX_INTERRUPT_TRIGGER_DELAY_NS)); + + break; + case IBEX_SPI_HOST_TXDATA: + /* + * This is a hardware `feature` where + * the first word written TXDATA after init is omitted entirely + */ + if (s->init_status) { + s->init_status = false; + return; + } + + for (int i = 0; i < 4; ++i) { + /* Attempting to write when TXFULL */ + if (fifo8_is_full(&s->tx_fifo)) { + /* Assert RXEMPTY, no IRQ */ + s->regs[IBEX_SPI_HOST_STATUS] |= R_STATUS_TXFULL_MASK; + s->regs[IBEX_SPI_HOST_ERROR_STATUS] |= + R_ERROR_STATUS_OVERFLOW_MASK; + ibex_spi_host_irq(s); + return; + } + /* Byte ordering is set by the IP */ + if ((s->regs[IBEX_SPI_HOST_STATUS] & + R_STATUS_BYTEORDER_MASK) == 0) { + /* LE: LSB transmitted first (default for ibex processor) */ + shift_mask = 0xff << (i * 8); + } else { + /* BE: MSB transmitted first */ + qemu_log_mask(LOG_UNIMP, + "%s: Big endian is not supported\n", __func__); + } + + fifo8_push(&s->tx_fifo, (val32 & shift_mask) >> (i * 8)); + } + + /* Reset TXEMPTY */ + s->regs[IBEX_SPI_HOST_STATUS] &= ~R_STATUS_TXEMPTY_MASK; + /* Update TXQD */ + txqd_len = (s->regs[IBEX_SPI_HOST_STATUS] & + R_STATUS_TXQD_MASK) >> R_STATUS_TXQD_SHIFT; + /* Partial bytes (size < 4) are padded, in words. */ + txqd_len += 1; + s->regs[IBEX_SPI_HOST_STATUS] &= ~R_STATUS_TXQD_MASK; + s->regs[IBEX_SPI_HOST_STATUS] |= txqd_len; + /* Assert Ready */ + s->regs[IBEX_SPI_HOST_STATUS] |= R_STATUS_READY_MASK; + break; + case IBEX_SPI_HOST_ERROR_ENABLE: + s->regs[addr] = val32; + + if (val32 & R_ERROR_ENABLE_CMDINVAL_MASK) { + qemu_log_mask(LOG_UNIMP, + "%s: Segment Length is not supported\n", __func__); + } + break; + case IBEX_SPI_HOST_ERROR_STATUS: + /* + * Indicates that any errors that have occurred. + * When an error occurs, the corresponding bit must be cleared + * here before issuing any further commands + */ + s->regs[addr] = val32; + break; + case IBEX_SPI_HOST_EVENT_ENABLE: + /* Controls which classes of SPI events raise an interrupt. */ + s->regs[addr] = val32; + + if (val32 & R_EVENT_ENABLE_RXWM_MASK) { + qemu_log_mask(LOG_UNIMP, + "%s: RXWM is not supported\n", __func__); + } + if (val32 & R_EVENT_ENABLE_TXWM_MASK) { + qemu_log_mask(LOG_UNIMP, + "%s: TXWM is not supported\n", __func__); + } + + if (val32 & R_EVENT_ENABLE_IDLE_MASK) { + qemu_log_mask(LOG_UNIMP, + "%s: IDLE is not supported\n", __func__); + } + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, "Bad offset 0x%" HWADDR_PRIx "\n", + addr << 2); + } +} + +static const MemoryRegionOps ibex_spi_ops = { + .read = ibex_spi_host_read, + .write = ibex_spi_host_write, + /* Ibex default LE */ + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static Property ibex_spi_properties[] = { + DEFINE_PROP_UINT32("num_cs", IbexSPIHostState, num_cs, 1), + DEFINE_PROP_END_OF_LIST(), +}; + +static const VMStateDescription vmstate_ibex = { + .name = TYPE_IBEX_SPI_HOST, + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32_ARRAY(regs, IbexSPIHostState, IBEX_SPI_HOST_MAX_REGS), + VMSTATE_VARRAY_UINT32(config_opts, IbexSPIHostState, + num_cs, 0, vmstate_info_uint32, uint32_t), + VMSTATE_FIFO8(rx_fifo, IbexSPIHostState), + VMSTATE_FIFO8(tx_fifo, IbexSPIHostState), + VMSTATE_TIMER_PTR(fifo_trigger_handle, IbexSPIHostState), + VMSTATE_BOOL(init_status, IbexSPIHostState), + VMSTATE_END_OF_LIST() + } +}; + +static void fifo_trigger_update(void *opaque) +{ + IbexSPIHostState *s = opaque; + ibex_spi_host_transfer(s); +} + +static void ibex_spi_host_realize(DeviceState *dev, Error **errp) +{ + IbexSPIHostState *s = IBEX_SPI_HOST(dev); + int i; + + s->ssi = ssi_create_bus(dev, "ssi"); + s->cs_lines = g_new0(qemu_irq, s->num_cs); + + for (i = 0; i < s->num_cs; ++i) { + sysbus_init_irq(SYS_BUS_DEVICE(dev), &s->cs_lines[i]); + } + + /* Setup CONFIGOPTS Multi-register */ + s->config_opts = g_new0(uint32_t, s->num_cs); + + /* Setup FIFO Interrupt Timer */ + s->fifo_trigger_handle = timer_new_ns(QEMU_CLOCK_VIRTUAL, + fifo_trigger_update, s); + + /* FIFO sizes as per OT Spec */ + fifo8_create(&s->tx_fifo, IBEX_SPI_HOST_TXFIFO_LEN); + fifo8_create(&s->rx_fifo, IBEX_SPI_HOST_RXFIFO_LEN); +} + +static void ibex_spi_host_init(Object *obj) +{ + IbexSPIHostState *s = IBEX_SPI_HOST(obj); + + sysbus_init_irq(SYS_BUS_DEVICE(obj), &s->host_err); + sysbus_init_irq(SYS_BUS_DEVICE(obj), &s->event); + + memory_region_init_io(&s->mmio, obj, &ibex_spi_ops, s, + TYPE_IBEX_SPI_HOST, 0x1000); + sysbus_init_mmio(SYS_BUS_DEVICE(obj), &s->mmio); +} + +static void ibex_spi_host_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + dc->realize = ibex_spi_host_realize; + dc->reset = ibex_spi_host_reset; + dc->vmsd = &vmstate_ibex; + device_class_set_props(dc, ibex_spi_properties); +} + +static const TypeInfo ibex_spi_host_info = { + .name = TYPE_IBEX_SPI_HOST, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(IbexSPIHostState), + .instance_init = ibex_spi_host_init, + .class_init = ibex_spi_host_class_init, +}; + +static void ibex_spi_host_register_types(void) +{ + type_register_static(&ibex_spi_host_info); +} + +type_init(ibex_spi_host_register_types) diff --git a/hw/ssi/meson.build b/hw/ssi/meson.build index 0ded9cd092..702aa5e4df 100644 --- a/hw/ssi/meson.build +++ b/hw/ssi/meson.build @@ -10,3 +10,4 @@ softmmu_ss.add(when: 'CONFIG_XILINX_SPIPS', if_true: files('xilinx_spips.c')) softmmu_ss.add(when: 'CONFIG_XLNX_VERSAL', if_true: files('xlnx-versal-ospi.c')) softmmu_ss.add(when: 'CONFIG_IMX', if_true: files('imx_spi.c')) softmmu_ss.add(when: 'CONFIG_OMAP', if_true: files('omap_spi.c')) +softmmu_ss.add(when: 'CONFIG_IBEX', if_true: files('ibex_spi_host.c')) diff --git a/hw/ssi/trace-events b/hw/ssi/trace-events index 612d3d6087..c707d4aaba 100644 --- a/hw/ssi/trace-events +++ b/hw/ssi/trace-events @@ -20,3 +20,10 @@ npcm7xx_fiu_ctrl_read(const char *id, uint64_t addr, uint32_t data) "%s offset: npcm7xx_fiu_ctrl_write(const char *id, uint64_t addr, uint32_t data) "%s offset: 0x%04" PRIx64 " value: 0x%08" PRIx32 npcm7xx_fiu_flash_read(const char *id, int cs, uint64_t addr, unsigned int size, uint64_t value) "%s[%d] offset: 0x%08" PRIx64 " size: %u value: 0x%" PRIx64 npcm7xx_fiu_flash_write(const char *id, unsigned cs, uint64_t addr, unsigned int size, uint64_t value) "%s[%d] offset: 0x%08" PRIx64 " size: %u value: 0x%" PRIx64 + +# ibex_spi_host.c + +ibex_spi_host_reset(const char *msg) "%s" +ibex_spi_host_transfer(uint32_t tx_data, uint32_t rx_data) "tx_data: 0x%" PRIx32 " rx_data: @0x%" PRIx32 +ibex_spi_host_write(uint64_t addr, uint32_t size, uint64_t data) "@0x%" PRIx64 " size %u: 0x%" PRIx64 +ibex_spi_host_read(uint64_t addr, uint32_t size) "@0x%" PRIx64 " size %u:" diff --git a/hw/virtio/trace-events b/hw/virtio/trace-events index a5102eac9e..333348d9d5 100644 --- a/hw/virtio/trace-events +++ b/hw/virtio/trace-events @@ -25,6 +25,8 @@ vhost_user_postcopy_waker_nomatch(const char *rb, uint64_t rb_offset) "%s + 0x%" # vhost-vdpa.c vhost_vdpa_dma_map(void *vdpa, int fd, uint32_t msg_type, uint64_t iova, uint64_t size, uint64_t uaddr, uint8_t perm, uint8_t type) "vdpa:%p fd: %d msg_type: %"PRIu32" iova: 0x%"PRIx64" size: 0x%"PRIx64" uaddr: 0x%"PRIx64" perm: 0x%"PRIx8" type: %"PRIu8 vhost_vdpa_dma_unmap(void *vdpa, int fd, uint32_t msg_type, uint64_t iova, uint64_t size, uint8_t type) "vdpa:%p fd: %d msg_type: %"PRIu32" iova: 0x%"PRIx64" size: 0x%"PRIx64" type: %"PRIu8 +vhost_vdpa_listener_begin_batch(void *v, int fd, uint32_t msg_type, uint8_t type) "vdpa:%p fd: %d msg_type: %"PRIu32" type: %"PRIu8 +vhost_vdpa_listener_commit(void *v, int fd, uint32_t msg_type, uint8_t type) "vdpa:%p fd: %d msg_type: %"PRIu32" type: %"PRIu8 vhost_vdpa_listener_region_add(void *vdpa, uint64_t iova, uint64_t llend, void *vaddr, bool readonly) "vdpa: %p iova 0x%"PRIx64" llend 0x%"PRIx64" vaddr: %p read-only: %d" vhost_vdpa_listener_region_del(void *vdpa, uint64_t iova, uint64_t llend) "vdpa: %p iova 0x%"PRIx64" llend 0x%"PRIx64 vhost_vdpa_add_status(void *dev, uint8_t status) "dev: %p status: 0x%"PRIx8 diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c index b57be529c7..a30510ed17 100644 --- a/hw/virtio/vhost-vdpa.c +++ b/hw/virtio/vhost-vdpa.c @@ -129,6 +129,7 @@ static void vhost_vdpa_listener_begin_batch(struct vhost_vdpa *v) .iotlb.type = VHOST_IOTLB_BATCH_BEGIN, }; + trace_vhost_vdpa_listener_begin_batch(v, fd, msg.type, msg.iotlb.type); if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) { error_report("failed to write, fd=%d, errno=%d (%s)", fd, errno, strerror(errno)); @@ -163,6 +164,7 @@ static void vhost_vdpa_listener_commit(MemoryListener *listener) msg.type = v->msg_type; msg.iotlb.type = VHOST_IOTLB_BATCH_END; + trace_vhost_vdpa_listener_commit(v, fd, msg.type, msg.iotlb.type); if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) { error_report("failed to write, fd=%d, errno=%d (%s)", fd, errno, strerror(errno)); |