diff options
Diffstat (limited to 'hw/intc')
| -rw-r--r-- | hw/intc/arm_gic_common.c | 2 | ||||
| -rw-r--r-- | hw/intc/arm_gic_kvm.c | 4 | ||||
| -rw-r--r-- | hw/intc/arm_gicv3.c | 67 | ||||
| -rw-r--r-- | hw/intc/arm_gicv3_common.c | 50 | ||||
| -rw-r--r-- | hw/intc/arm_gicv3_cpuif.c | 268 | ||||
| -rw-r--r-- | hw/intc/arm_gicv3_dist.c | 36 | ||||
| -rw-r--r-- | hw/intc/arm_gicv3_its.c | 4 | ||||
| -rw-r--r-- | hw/intc/arm_gicv3_its_common.c | 2 | ||||
| -rw-r--r-- | hw/intc/arm_gicv3_its_kvm.c | 4 | ||||
| -rw-r--r-- | hw/intc/arm_gicv3_kvm.c | 9 | ||||
| -rw-r--r-- | hw/intc/arm_gicv3_redist.c | 22 | ||||
| -rw-r--r-- | hw/intc/gicv3_internal.h | 13 | ||||
| -rw-r--r-- | hw/intc/trace-events | 2 | ||||
| -rw-r--r-- | hw/intc/xics.c | 2 |
14 files changed, 446 insertions, 39 deletions
diff --git a/hw/intc/arm_gic_common.c b/hw/intc/arm_gic_common.c index 94c173cb07..53fb2c4e2d 100644 --- a/hw/intc/arm_gic_common.c +++ b/hw/intc/arm_gic_common.c @@ -263,7 +263,7 @@ static inline void arm_gic_common_reset_irq_state(GICState *s, int cidx, } } -static void arm_gic_common_reset_hold(Object *obj) +static void arm_gic_common_reset_hold(Object *obj, ResetType type) { GICState *s = ARM_GIC_COMMON(obj); int i, j; diff --git a/hw/intc/arm_gic_kvm.c b/hw/intc/arm_gic_kvm.c index e0d9e512a3..53defee7d5 100644 --- a/hw/intc/arm_gic_kvm.c +++ b/hw/intc/arm_gic_kvm.c @@ -473,13 +473,13 @@ static void kvm_arm_gic_get(GICState *s) } } -static void kvm_arm_gic_reset_hold(Object *obj) +static void kvm_arm_gic_reset_hold(Object *obj, ResetType type) { GICState *s = ARM_GIC_COMMON(obj); KVMARMGICClass *kgc = KVM_ARM_GIC_GET_CLASS(s); if (kgc->parent_phases.hold) { - kgc->parent_phases.hold(obj); + kgc->parent_phases.hold(obj, type); } if (kvm_arm_gic_can_save_restore(s)) { diff --git a/hw/intc/arm_gicv3.c b/hw/intc/arm_gicv3.c index 0b8f79a122..58e18fff54 100644 --- a/hw/intc/arm_gicv3.c +++ b/hw/intc/arm_gicv3.c @@ -21,7 +21,7 @@ #include "hw/intc/arm_gicv3.h" #include "gicv3_internal.h" -static bool irqbetter(GICv3CPUState *cs, int irq, uint8_t prio) +static bool irqbetter(GICv3CPUState *cs, int irq, uint8_t prio, bool nmi) { /* Return true if this IRQ at this priority should take * precedence over the current recorded highest priority @@ -30,14 +30,23 @@ static bool irqbetter(GICv3CPUState *cs, int irq, uint8_t prio) * is the same as this one (a property which the calling code * relies on). */ - if (prio < cs->hppi.prio) { - return true; + if (prio != cs->hppi.prio) { + return prio < cs->hppi.prio; + } + + /* + * The same priority IRQ with non-maskable property should signal to + * the CPU as it have the priority higher than the labelled 0x80 or 0x00. + */ + if (nmi != cs->hppi.nmi) { + return nmi; } + /* If multiple pending interrupts have the same priority then it is an * IMPDEF choice which of them to signal to the CPU. We choose to * signal the one with the lowest interrupt number. */ - if (prio == cs->hppi.prio && irq <= cs->hppi.irq) { + if (irq <= cs->hppi.irq) { return true; } return false; @@ -129,6 +138,40 @@ static uint32_t gicr_int_pending(GICv3CPUState *cs) return pend; } +static bool gicv3_get_priority(GICv3CPUState *cs, bool is_redist, int irq, + uint8_t *prio) +{ + uint32_t nmi = 0x0; + + if (is_redist) { + nmi = extract32(cs->gicr_inmir0, irq, 1); + } else { + nmi = *gic_bmp_ptr32(cs->gic->nmi, irq); + nmi = nmi & (1 << (irq & 0x1f)); + } + + if (nmi) { + /* DS = 0 & Non-secure NMI */ + if (!(cs->gic->gicd_ctlr & GICD_CTLR_DS) && + ((is_redist && extract32(cs->gicr_igroupr0, irq, 1)) || + (!is_redist && gicv3_gicd_group_test(cs->gic, irq)))) { + *prio = 0x80; + } else { + *prio = 0x0; + } + + return true; + } + + if (is_redist) { + *prio = cs->gicr_ipriorityr[irq]; + } else { + *prio = cs->gic->gicd_ipriority[irq]; + } + + return false; +} + /* Update the interrupt status after state in a redistributor * or CPU interface has changed, but don't tell the CPU i/f. */ @@ -141,6 +184,7 @@ static void gicv3_redist_update_noirqset(GICv3CPUState *cs) uint8_t prio; int i; uint32_t pend; + bool nmi = false; /* Find out which redistributor interrupts are eligible to be * signaled to the CPU interface. @@ -152,10 +196,11 @@ static void gicv3_redist_update_noirqset(GICv3CPUState *cs) if (!(pend & (1 << i))) { continue; } - prio = cs->gicr_ipriorityr[i]; - if (irqbetter(cs, i, prio)) { + nmi = gicv3_get_priority(cs, true, i, &prio); + if (irqbetter(cs, i, prio, nmi)) { cs->hppi.irq = i; cs->hppi.prio = prio; + cs->hppi.nmi = nmi; seenbetter = true; } } @@ -168,9 +213,10 @@ static void gicv3_redist_update_noirqset(GICv3CPUState *cs) if ((cs->gicr_ctlr & GICR_CTLR_ENABLE_LPIS) && cs->gic->lpi_enable && (cs->gic->gicd_ctlr & GICD_CTLR_EN_GRP1NS) && (cs->hpplpi.prio != 0xff)) { - if (irqbetter(cs, cs->hpplpi.irq, cs->hpplpi.prio)) { + if (irqbetter(cs, cs->hpplpi.irq, cs->hpplpi.prio, cs->hpplpi.nmi)) { cs->hppi.irq = cs->hpplpi.irq; cs->hppi.prio = cs->hpplpi.prio; + cs->hppi.nmi = cs->hpplpi.nmi; cs->hppi.grp = cs->hpplpi.grp; seenbetter = true; } @@ -213,6 +259,7 @@ static void gicv3_update_noirqset(GICv3State *s, int start, int len) int i; uint8_t prio; uint32_t pend = 0; + bool nmi = false; assert(start >= GIC_INTERNAL); assert(len > 0); @@ -240,10 +287,11 @@ static void gicv3_update_noirqset(GICv3State *s, int start, int len) */ continue; } - prio = s->gicd_ipriority[i]; - if (irqbetter(cs, i, prio)) { + nmi = gicv3_get_priority(cs, false, i, &prio); + if (irqbetter(cs, i, prio, nmi)) { cs->hppi.irq = i; cs->hppi.prio = prio; + cs->hppi.nmi = nmi; cs->seenbetter = true; } } @@ -293,6 +341,7 @@ void gicv3_full_update_noirqset(GICv3State *s) for (i = 0; i < s->num_cpu; i++) { s->cpu[i].hppi.prio = 0xff; + s->cpu[i].hppi.nmi = false; } /* Note that we can guarantee that these functions will not diff --git a/hw/intc/arm_gicv3_common.c b/hw/intc/arm_gicv3_common.c index cb55c72681..bd50a1b079 100644 --- a/hw/intc/arm_gicv3_common.c +++ b/hw/intc/arm_gicv3_common.c @@ -164,6 +164,24 @@ const VMStateDescription vmstate_gicv3_gicv4 = { } }; +static bool gicv3_cpu_nmi_needed(void *opaque) +{ + GICv3CPUState *cs = opaque; + + return cs->gic->nmi_support; +} + +static const VMStateDescription vmstate_gicv3_cpu_nmi = { + .name = "arm_gicv3_cpu/nmi", + .version_id = 1, + .minimum_version_id = 1, + .needed = gicv3_cpu_nmi_needed, + .fields = (const VMStateField[]) { + VMSTATE_UINT32(gicr_inmir0, GICv3CPUState), + VMSTATE_END_OF_LIST() + } +}; + static const VMStateDescription vmstate_gicv3_cpu = { .name = "arm_gicv3_cpu", .version_id = 1, @@ -196,6 +214,7 @@ static const VMStateDescription vmstate_gicv3_cpu = { &vmstate_gicv3_cpu_virt, &vmstate_gicv3_cpu_sre_el1, &vmstate_gicv3_gicv4, + &vmstate_gicv3_cpu_nmi, NULL } }; @@ -238,6 +257,24 @@ const VMStateDescription vmstate_gicv3_gicd_no_migration_shift_bug = { } }; +static bool gicv3_nmi_needed(void *opaque) +{ + GICv3State *cs = opaque; + + return cs->nmi_support; +} + +const VMStateDescription vmstate_gicv3_gicd_nmi = { + .name = "arm_gicv3/gicd_nmi", + .version_id = 1, + .minimum_version_id = 1, + .needed = gicv3_nmi_needed, + .fields = (const VMStateField[]) { + VMSTATE_UINT32_ARRAY(nmi, GICv3State, GICV3_BMP_SIZE), + VMSTATE_END_OF_LIST() + } +}; + static const VMStateDescription vmstate_gicv3 = { .name = "arm_gicv3", .version_id = 1, @@ -266,6 +303,7 @@ static const VMStateDescription vmstate_gicv3 = { }, .subsections = (const VMStateDescription * const []) { &vmstate_gicv3_gicd_no_migration_shift_bug, + &vmstate_gicv3_gicd_nmi, NULL } }; @@ -299,6 +337,12 @@ void gicv3_init_irqs_and_mmio(GICv3State *s, qemu_irq_handler handler, for (i = 0; i < s->num_cpu; i++) { sysbus_init_irq(sbd, &s->cpu[i].parent_vfiq); } + for (i = 0; i < s->num_cpu; i++) { + sysbus_init_irq(sbd, &s->cpu[i].parent_nmi); + } + for (i = 0; i < s->num_cpu; i++) { + sysbus_init_irq(sbd, &s->cpu[i].parent_vnmi); + } memory_region_init_io(&s->iomem_dist, OBJECT(s), ops, s, "gicv3_dist", 0x10000); @@ -451,7 +495,7 @@ static void arm_gicv3_finalize(Object *obj) g_free(s->redist_region_count); } -static void arm_gicv3_common_reset_hold(Object *obj) +static void arm_gicv3_common_reset_hold(Object *obj, ResetType type) { GICv3State *s = ARM_GICV3_COMMON(obj); int i; @@ -492,8 +536,11 @@ static void arm_gicv3_common_reset_hold(Object *obj) memset(cs->gicr_ipriorityr, 0, sizeof(cs->gicr_ipriorityr)); cs->hppi.prio = 0xff; + cs->hppi.nmi = false; cs->hpplpi.prio = 0xff; + cs->hpplpi.nmi = false; cs->hppvlpi.prio = 0xff; + cs->hppvlpi.nmi = false; /* State in the CPU interface must *not* be reset here, because it * is part of the CPU's reset domain, not the GIC device's. @@ -563,6 +610,7 @@ static Property arm_gicv3_common_properties[] = { DEFINE_PROP_UINT32("num-irq", GICv3State, num_irq, 32), DEFINE_PROP_UINT32("revision", GICv3State, revision, 3), DEFINE_PROP_BOOL("has-lpi", GICv3State, lpi_enable, 0), + DEFINE_PROP_BOOL("has-nmi", GICv3State, nmi_support, 0), DEFINE_PROP_BOOL("has-security-extensions", GICv3State, security_extn, 0), /* * Compatibility property: force 8 bits of physical priority, even diff --git a/hw/intc/arm_gicv3_cpuif.c b/hw/intc/arm_gicv3_cpuif.c index 67d8fd07b7..bdb13b00e9 100644 --- a/hw/intc/arm_gicv3_cpuif.c +++ b/hw/intc/arm_gicv3_cpuif.c @@ -21,6 +21,7 @@ #include "hw/irq.h" #include "cpu.h" #include "target/arm/cpregs.h" +#include "target/arm/cpu-features.h" #include "sysemu/tcg.h" #include "sysemu/qtest.h" @@ -157,6 +158,10 @@ static int ich_highest_active_virt_prio(GICv3CPUState *cs) int i; int aprmax = ich_num_aprs(cs); + if (cs->ich_apr[GICV3_G1NS][0] & ICV_AP1R_EL1_NMI) { + return 0x0; + } + for (i = 0; i < aprmax; i++) { uint32_t apr = cs->ich_apr[GICV3_G0][i] | cs->ich_apr[GICV3_G1NS][i]; @@ -191,6 +196,7 @@ static int hppvi_index(GICv3CPUState *cs) * correct behaviour. */ int prio = 0xff; + bool nmi = false; if (!(cs->ich_vmcr_el2 & (ICH_VMCR_EL2_VENG0 | ICH_VMCR_EL2_VENG1))) { /* Both groups disabled, definitely nothing to do */ @@ -199,6 +205,7 @@ static int hppvi_index(GICv3CPUState *cs) for (i = 0; i < cs->num_list_regs; i++) { uint64_t lr = cs->ich_lr_el2[i]; + bool thisnmi; int thisprio; if (ich_lr_state(lr) != ICH_LR_EL2_STATE_PENDING) { @@ -217,10 +224,12 @@ static int hppvi_index(GICv3CPUState *cs) } } + thisnmi = lr & ICH_LR_EL2_NMI; thisprio = ich_lr_prio(lr); - if (thisprio < prio) { + if ((thisprio < prio) || ((thisprio == prio) && (thisnmi & (!nmi)))) { prio = thisprio; + nmi = thisnmi; idx = i; } } @@ -289,6 +298,7 @@ static bool icv_hppi_can_preempt(GICv3CPUState *cs, uint64_t lr) * equivalent of these checks. */ int grp; + bool is_nmi; uint32_t mask, prio, rprio, vpmr; if (!(cs->ich_hcr_el2 & ICH_HCR_EL2_EN)) { @@ -301,10 +311,11 @@ static bool icv_hppi_can_preempt(GICv3CPUState *cs, uint64_t lr) */ prio = ich_lr_prio(lr); + is_nmi = lr & ICH_LR_EL2_NMI; vpmr = extract64(cs->ich_vmcr_el2, ICH_VMCR_EL2_VPMR_SHIFT, ICH_VMCR_EL2_VPMR_LENGTH); - if (prio >= vpmr) { + if (!is_nmi && prio >= vpmr) { /* Priority mask masks this interrupt */ return false; } @@ -326,6 +337,11 @@ static bool icv_hppi_can_preempt(GICv3CPUState *cs, uint64_t lr) return true; } + if ((prio & mask) == (rprio & mask) && is_nmi && + !(cs->ich_apr[GICV3_G1NS][0] & ICV_AP1R_EL1_NMI)) { + return true; + } + return false; } @@ -465,6 +481,7 @@ void gicv3_cpuif_virt_irq_fiq_update(GICv3CPUState *cs) int idx; int irqlevel = 0; int fiqlevel = 0; + int nmilevel = 0; idx = hppvi_index(cs); trace_gicv3_cpuif_virt_update(gicv3_redist_affid(cs), idx, @@ -482,9 +499,17 @@ void gicv3_cpuif_virt_irq_fiq_update(GICv3CPUState *cs) uint64_t lr = cs->ich_lr_el2[idx]; if (icv_hppi_can_preempt(cs, lr)) { - /* Virtual interrupts are simple: G0 are always FIQ, and G1 IRQ */ + /* + * Virtual interrupts are simple: G0 are always FIQ, and G1 are + * IRQ or NMI which depends on the ICH_LR<n>_EL2.NMI to have + * non-maskable property. + */ if (lr & ICH_LR_EL2_GROUP) { - irqlevel = 1; + if (lr & ICH_LR_EL2_NMI) { + nmilevel = 1; + } else { + irqlevel = 1; + } } else { fiqlevel = 1; } @@ -494,6 +519,7 @@ void gicv3_cpuif_virt_irq_fiq_update(GICv3CPUState *cs) trace_gicv3_cpuif_virt_set_irqs(gicv3_redist_affid(cs), fiqlevel, irqlevel); qemu_set_irq(cs->parent_vfiq, fiqlevel); qemu_set_irq(cs->parent_virq, irqlevel); + qemu_set_irq(cs->parent_vnmi, nmilevel); } static void gicv3_cpuif_virt_update(GICv3CPUState *cs) @@ -550,7 +576,11 @@ static void icv_ap_write(CPUARMState *env, const ARMCPRegInfo *ri, trace_gicv3_icv_ap_write(ri->crm & 1, regno, gicv3_redist_affid(cs), value); - cs->ich_apr[grp][regno] = value & 0xFFFFFFFFU; + if (cs->nmi_support) { + cs->ich_apr[grp][regno] = value & (0xFFFFFFFFU | ICV_AP1R_EL1_NMI); + } else { + cs->ich_apr[grp][regno] = value & 0xFFFFFFFFU; + } gicv3_cpuif_virt_irq_fiq_update(cs); return; @@ -697,7 +727,11 @@ static void icv_ctlr_write(CPUARMState *env, const ARMCPRegInfo *ri, static uint64_t icv_rpr_read(CPUARMState *env, const ARMCPRegInfo *ri) { GICv3CPUState *cs = icc_cs_from_env(env); - int prio = ich_highest_active_virt_prio(cs); + uint64_t prio = ich_highest_active_virt_prio(cs); + + if (cs->ich_apr[GICV3_G1NS][0] & ICV_AP1R_EL1_NMI) { + prio |= ICV_RPR_EL1_NMI; + } trace_gicv3_icv_rpr_read(gicv3_redist_affid(cs), prio); return prio; @@ -736,13 +770,19 @@ static void icv_activate_irq(GICv3CPUState *cs, int idx, int grp) */ uint32_t mask = icv_gprio_mask(cs, grp); int prio = ich_lr_prio(cs->ich_lr_el2[idx]) & mask; + bool nmi = cs->ich_lr_el2[idx] & ICH_LR_EL2_NMI; int aprbit = prio >> (8 - cs->vprebits); int regno = aprbit / 32; int regbit = aprbit % 32; cs->ich_lr_el2[idx] &= ~ICH_LR_EL2_STATE_PENDING_BIT; cs->ich_lr_el2[idx] |= ICH_LR_EL2_STATE_ACTIVE_BIT; - cs->ich_apr[grp][regno] |= (1 << regbit); + + if (nmi) { + cs->ich_apr[grp][regno] |= ICV_AP1R_EL1_NMI; + } else { + cs->ich_apr[grp][regno] |= (1 << regbit); + } } static void icv_activate_vlpi(GICv3CPUState *cs) @@ -763,6 +803,7 @@ static uint64_t icv_iar_read(CPUARMState *env, const ARMCPRegInfo *ri) int grp = ri->crm == 8 ? GICV3_G0 : GICV3_G1NS; int idx = hppvi_index(cs); uint64_t intid = INTID_SPURIOUS; + int el = arm_current_el(env); if (idx == HPPVI_INDEX_VLPI) { if (cs->hppvlpi.grp == grp && icv_hppvlpi_can_preempt(cs)) { @@ -772,11 +813,16 @@ static uint64_t icv_iar_read(CPUARMState *env, const ARMCPRegInfo *ri) } else if (idx >= 0) { uint64_t lr = cs->ich_lr_el2[idx]; int thisgrp = (lr & ICH_LR_EL2_GROUP) ? GICV3_G1NS : GICV3_G0; + bool nmi = env->cp15.sctlr_el[el] & SCTLR_NMI && lr & ICH_LR_EL2_NMI; if (thisgrp == grp && icv_hppi_can_preempt(cs, lr)) { intid = ich_lr_vintid(lr); if (!gicv3_intid_is_special(intid)) { - icv_activate_irq(cs, idx, grp); + if (!nmi) { + icv_activate_irq(cs, idx, grp); + } else { + intid = INTID_NMI; + } } else { /* Interrupt goes from Pending to Invalid */ cs->ich_lr_el2[idx] &= ~ICH_LR_EL2_STATE_PENDING_BIT; @@ -795,6 +841,42 @@ static uint64_t icv_iar_read(CPUARMState *env, const ARMCPRegInfo *ri) return intid; } +static uint64_t icv_nmiar1_read(CPUARMState *env, const ARMCPRegInfo *ri) +{ + GICv3CPUState *cs = icc_cs_from_env(env); + int idx = hppvi_index(cs); + uint64_t intid = INTID_SPURIOUS; + + if (idx >= 0 && idx != HPPVI_INDEX_VLPI) { + uint64_t lr = cs->ich_lr_el2[idx]; + int thisgrp = (lr & ICH_LR_EL2_GROUP) ? GICV3_G1NS : GICV3_G0; + + if ((thisgrp == GICV3_G1NS) && icv_hppi_can_preempt(cs, lr)) { + intid = ich_lr_vintid(lr); + if (!gicv3_intid_is_special(intid)) { + if (lr & ICH_LR_EL2_NMI) { + icv_activate_irq(cs, idx, GICV3_G1NS); + } else { + intid = INTID_SPURIOUS; + } + } else { + /* Interrupt goes from Pending to Invalid */ + cs->ich_lr_el2[idx] &= ~ICH_LR_EL2_STATE_PENDING_BIT; + /* + * We will now return the (bogus) ID from the list register, + * as per the pseudocode. + */ + } + } + } + + trace_gicv3_icv_nmiar1_read(gicv3_redist_affid(cs), intid); + + gicv3_cpuif_virt_update(cs); + + return intid; +} + static uint32_t icc_fullprio_mask(GICv3CPUState *cs) { /* @@ -832,6 +914,23 @@ static int icc_highest_active_prio(GICv3CPUState *cs) */ int i; + if (cs->nmi_support) { + /* + * If an NMI is active this takes precedence over anything else + * for priority purposes; the NMI bit is only in the AP1R0 bit. + * We return here the effective priority of the NMI, which is + * either 0x0 or 0x80. Callers will need to check NMI again for + * purposes of either setting the RPR register bits or for + * prioritization of NMI vs non-NMI. + */ + if (cs->icc_apr[GICV3_G1][0] & ICC_AP1R_EL1_NMI) { + return 0; + } + if (cs->icc_apr[GICV3_G1NS][0] & ICC_AP1R_EL1_NMI) { + return (cs->gic->gicd_ctlr & GICD_CTLR_DS) ? 0 : 0x80; + } + } + for (i = 0; i < icc_num_aprs(cs); i++) { uint32_t apr = cs->icc_apr[GICV3_G0][i] | cs->icc_apr[GICV3_G1][i] | cs->icc_apr[GICV3_G1NS][i]; @@ -898,12 +997,24 @@ static bool icc_hppi_can_preempt(GICv3CPUState *cs) */ int rprio; uint32_t mask; + ARMCPU *cpu = ARM_CPU(cs->cpu); + CPUARMState *env = &cpu->env; if (icc_no_enabled_hppi(cs)) { return false; } - if (cs->hppi.prio >= cs->icc_pmr_el1) { + if (cs->hppi.nmi) { + if (!(cs->gic->gicd_ctlr & GICD_CTLR_DS) && + cs->hppi.grp == GICV3_G1NS) { + if (cs->icc_pmr_el1 < 0x80) { + return false; + } + if (arm_is_secure(env) && cs->icc_pmr_el1 == 0x80) { + return false; + } + } + } else if (cs->hppi.prio >= cs->icc_pmr_el1) { /* Priority mask masks this interrupt */ return false; } @@ -923,6 +1034,12 @@ static bool icc_hppi_can_preempt(GICv3CPUState *cs) return true; } + if (cs->hppi.nmi && (cs->hppi.prio & mask) == (rprio & mask)) { + if (!(cs->icc_apr[cs->hppi.grp][0] & ICC_AP1R_EL1_NMI)) { + return true; + } + } + return false; } @@ -931,6 +1048,7 @@ void gicv3_cpuif_update(GICv3CPUState *cs) /* Tell the CPU about its highest priority pending interrupt */ int irqlevel = 0; int fiqlevel = 0; + int nmilevel = 0; ARMCPU *cpu = ARM_CPU(cs->cpu); CPUARMState *env = &cpu->env; @@ -969,6 +1087,8 @@ void gicv3_cpuif_update(GICv3CPUState *cs) if (isfiq) { fiqlevel = 1; + } else if (cs->hppi.nmi) { + nmilevel = 1; } else { irqlevel = 1; } @@ -978,6 +1098,7 @@ void gicv3_cpuif_update(GICv3CPUState *cs) qemu_set_irq(cs->parent_fiq, fiqlevel); qemu_set_irq(cs->parent_irq, irqlevel); + qemu_set_irq(cs->parent_nmi, nmilevel); } static uint64_t icc_pmr_read(CPUARMState *env, const ARMCPRegInfo *ri) @@ -1044,8 +1165,13 @@ static void icc_activate_irq(GICv3CPUState *cs, int irq) int aprbit = prio >> (8 - cs->prebits); int regno = aprbit / 32; int regbit = aprbit % 32; + bool nmi = cs->hppi.nmi; - cs->icc_apr[cs->hppi.grp][regno] |= (1 << regbit); + if (nmi) { + cs->icc_apr[cs->hppi.grp][regno] |= ICC_AP1R_EL1_NMI; + } else { + cs->icc_apr[cs->hppi.grp][regno] |= (1 << regbit); + } if (irq < GIC_INTERNAL) { cs->gicr_iactiver0 = deposit32(cs->gicr_iactiver0, irq, 1, 1); @@ -1159,6 +1285,7 @@ static uint64_t icc_iar0_read(CPUARMState *env, const ARMCPRegInfo *ri) static uint64_t icc_iar1_read(CPUARMState *env, const ARMCPRegInfo *ri) { GICv3CPUState *cs = icc_cs_from_env(env); + int el = arm_current_el(env); uint64_t intid; if (icv_access(env, HCR_IMO)) { @@ -1172,13 +1299,44 @@ static uint64_t icc_iar1_read(CPUARMState *env, const ARMCPRegInfo *ri) } if (!gicv3_intid_is_special(intid)) { - icc_activate_irq(cs, intid); + if (cs->hppi.nmi && env->cp15.sctlr_el[el] & SCTLR_NMI) { + intid = INTID_NMI; + } else { + icc_activate_irq(cs, intid); + } } trace_gicv3_icc_iar1_read(gicv3_redist_affid(cs), intid); return intid; } +static uint64_t icc_nmiar1_read(CPUARMState *env, const ARMCPRegInfo *ri) +{ + GICv3CPUState *cs = icc_cs_from_env(env); + uint64_t intid; + + if (icv_access(env, HCR_IMO)) { + return icv_nmiar1_read(env, ri); + } + + if (!icc_hppi_can_preempt(cs)) { + intid = INTID_SPURIOUS; + } else { + intid = icc_hppir1_value(cs, env); + } + + if (!gicv3_intid_is_special(intid)) { + if (!cs->hppi.nmi) { + intid = INTID_SPURIOUS; + } else { + icc_activate_irq(cs, intid); + } + } + + trace_gicv3_icc_nmiar1_read(gicv3_redist_affid(cs), intid); + return intid; +} + static void icc_drop_prio(GICv3CPUState *cs, int grp) { /* Drop the priority of the currently active interrupt in @@ -1205,6 +1363,12 @@ static void icc_drop_prio(GICv3CPUState *cs, int grp) if (!*papr) { continue; } + + if (i == 0 && cs->nmi_support && (*papr & ICC_AP1R_EL1_NMI)) { + *papr &= (~ICC_AP1R_EL1_NMI); + break; + } + /* Clear the lowest set bit */ *papr &= *papr - 1; break; @@ -1239,6 +1403,15 @@ static int icc_highest_active_group(GICv3CPUState *cs) */ int i; + if (cs->nmi_support) { + if (cs->icc_apr[GICV3_G1][0] & ICC_AP1R_EL1_NMI) { + return GICV3_G1; + } + if (cs->icc_apr[GICV3_G1NS][0] & ICC_AP1R_EL1_NMI) { + return GICV3_G1NS; + } + } + for (i = 0; i < ARRAY_SIZE(cs->icc_apr[0]); i++) { int g0ctz = ctz32(cs->icc_apr[GICV3_G0][i]); int g1ctz = ctz32(cs->icc_apr[GICV3_G1][i]); @@ -1329,7 +1502,7 @@ static void icv_increment_eoicount(GICv3CPUState *cs) ICH_HCR_EL2_EOICOUNT_LENGTH, eoicount + 1); } -static int icv_drop_prio(GICv3CPUState *cs) +static int icv_drop_prio(GICv3CPUState *cs, bool *nmi) { /* Drop the priority of the currently active virtual interrupt * (favouring group 0 if there is a set active bit at @@ -1351,6 +1524,12 @@ static int icv_drop_prio(GICv3CPUState *cs) continue; } + if (i == 0 && cs->nmi_support && (*papr1 & ICV_AP1R_EL1_NMI)) { + *papr1 &= (~ICV_AP1R_EL1_NMI); + *nmi = true; + return 0xff; + } + /* We can't just use the bit-twiddling hack icc_drop_prio() does * because we need to return the bit number we cleared so * it can be compared against the list register's priority field. @@ -1410,6 +1589,7 @@ static void icv_eoir_write(CPUARMState *env, const ARMCPRegInfo *ri, int irq = value & 0xffffff; int grp = ri->crm == 8 ? GICV3_G0 : GICV3_G1NS; int idx, dropprio; + bool nmi = false; trace_gicv3_icv_eoir_write(ri->crm == 8 ? 0 : 1, gicv3_redist_affid(cs), value); @@ -1422,8 +1602,8 @@ static void icv_eoir_write(CPUARMState *env, const ARMCPRegInfo *ri, * error checks" (because that lets us avoid scanning the AP * registers twice). */ - dropprio = icv_drop_prio(cs); - if (dropprio == 0xff) { + dropprio = icv_drop_prio(cs, &nmi); + if (dropprio == 0xff && !nmi) { /* No active interrupt. It is CONSTRAINED UNPREDICTABLE * whether the list registers are checked in this * situation; we choose not to. @@ -1445,8 +1625,9 @@ static void icv_eoir_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t lr = cs->ich_lr_el2[idx]; int thisgrp = (lr & ICH_LR_EL2_GROUP) ? GICV3_G1NS : GICV3_G0; int lr_gprio = ich_lr_prio(lr) & icv_gprio_mask(cs, grp); + bool thisnmi = lr & ICH_LR_EL2_NMI; - if (thisgrp == grp && lr_gprio == dropprio) { + if (thisgrp == grp && (lr_gprio == dropprio || (thisnmi & nmi))) { if (!icv_eoi_split(env, cs) || irq >= GICV3_LPI_INTID_START) { /* * Priority drop and deactivate not split: deactivate irq now. @@ -1693,7 +1874,11 @@ static void icc_ap_write(CPUARMState *env, const ARMCPRegInfo *ri, return; } - cs->icc_apr[grp][regno] = value & 0xFFFFFFFFU; + if (cs->nmi_support) { + cs->icc_apr[grp][regno] = value & (0xFFFFFFFFU | ICC_AP1R_EL1_NMI); + } else { + cs->icc_apr[grp][regno] = value & 0xFFFFFFFFU; + } gicv3_cpuif_update(cs); } @@ -1783,7 +1968,7 @@ static void icc_dir_write(CPUARMState *env, const ARMCPRegInfo *ri, static uint64_t icc_rpr_read(CPUARMState *env, const ARMCPRegInfo *ri) { GICv3CPUState *cs = icc_cs_from_env(env); - int prio; + uint64_t prio; if (icv_access(env, HCR_FMO | HCR_IMO)) { return icv_rpr_read(env, ri); @@ -1803,6 +1988,22 @@ static uint64_t icc_rpr_read(CPUARMState *env, const ARMCPRegInfo *ri) } } + if (cs->nmi_support) { + /* NMI info is reported in the high bits of RPR */ + if (arm_feature(env, ARM_FEATURE_EL3) && !arm_is_secure(env)) { + if (cs->icc_apr[GICV3_G1NS][0] & ICC_AP1R_EL1_NMI) { + prio |= ICC_RPR_EL1_NMI; + } + } else { + if (cs->icc_apr[GICV3_G1NS][0] & ICC_AP1R_EL1_NMI) { + prio |= ICC_RPR_EL1_NSNMI; + } + if (cs->icc_apr[GICV3_G1][0] & ICC_AP1R_EL1_NMI) { + prio |= ICC_RPR_EL1_NMI; + } + } + } + trace_gicv3_icc_rpr_read(gicv3_redist_affid(cs), prio); return prio; } @@ -2482,6 +2683,15 @@ static const ARMCPRegInfo gicv3_cpuif_icc_apxr23_reginfo[] = { }, }; +static const ARMCPRegInfo gicv3_cpuif_gicv3_nmi_reginfo[] = { + { .name = "ICC_NMIAR1_EL1", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 9, .opc2 = 5, + .type = ARM_CP_IO | ARM_CP_NO_RAW, + .access = PL1_R, .accessfn = gicv3_irq_access, + .readfn = icc_nmiar1_read, + }, +}; + static uint64_t ich_ap_read(CPUARMState *env, const ARMCPRegInfo *ri) { GICv3CPUState *cs = icc_cs_from_env(env); @@ -2503,7 +2713,11 @@ static void ich_ap_write(CPUARMState *env, const ARMCPRegInfo *ri, trace_gicv3_ich_ap_write(ri->crm & 1, regno, gicv3_redist_affid(cs), value); - cs->ich_apr[grp][regno] = value & 0xFFFFFFFFU; + if (cs->nmi_support) { + cs->ich_apr[grp][regno] = value & (0xFFFFFFFFU | ICV_AP1R_EL1_NMI); + } else { + cs->ich_apr[grp][regno] = value & 0xFFFFFFFFU; + } gicv3_cpuif_virt_irq_fiq_update(cs); } @@ -2620,6 +2834,11 @@ static void ich_lr_write(CPUARMState *env, const ARMCPRegInfo *ri, 8 - cs->vpribits, 0); } + /* Enforce RES0 bit in NMI field when FEAT_GICv3_NMI is not implemented */ + if (!cs->nmi_support) { + value &= ~ICH_LR_EL2_NMI; + } + cs->ich_lr_el2[regno] = value; gicv3_cpuif_virt_update(cs); } @@ -2839,6 +3058,19 @@ void gicv3_init_cpuif(GICv3State *s) define_arm_cp_regs(cpu, gicv3_cpuif_reginfo); /* + * If the CPU implements FEAT_NMI and FEAT_GICv3 it must also + * implement FEAT_GICv3_NMI, which is the CPU interface part + * of NMI support. This is distinct from whether the GIC proper + * (redistributors and distributor) have NMI support. In QEMU + * that is a property of the GIC device in s->nmi_support; + * cs->nmi_support indicates the CPU interface's support. + */ + if (cpu_isar_feature(aa64_nmi, cpu)) { + cs->nmi_support = true; + define_arm_cp_regs(cpu, gicv3_cpuif_gicv3_nmi_reginfo); + } + + /* * The CPU implementation specifies the number of supported * bits of physical priority. For backwards compatibility * of migration, we have a compat property that forces use diff --git a/hw/intc/arm_gicv3_dist.c b/hw/intc/arm_gicv3_dist.c index 35e850685c..d8207acb22 100644 --- a/hw/intc/arm_gicv3_dist.c +++ b/hw/intc/arm_gicv3_dist.c @@ -89,6 +89,29 @@ static int gicd_ns_access(GICv3State *s, int irq) return extract32(s->gicd_nsacr[irq / 16], (irq % 16) * 2, 2); } +static void gicd_write_bitmap_reg(GICv3State *s, MemTxAttrs attrs, + uint32_t *bmp, maskfn *maskfn, + int offset, uint32_t val) +{ + /* + * Helper routine to implement writing to a "set" register + * (GICD_INMIR, etc). + * Semantics implemented here: + * RAZ/WI for SGIs, PPIs, unimplemented IRQs + * Bits corresponding to Group 0 or Secure Group 1 interrupts RAZ/WI. + * offset should be the offset in bytes of the register from the start + * of its group. + */ + int irq = offset * 8; + + if (irq < GIC_INTERNAL || irq >= s->num_irq) { + return; + } + val &= mask_group_and_nsacr(s, attrs, maskfn, irq); + *gic_bmp_ptr32(bmp, irq) = val; + gicv3_update(s, irq, 32); +} + static void gicd_write_set_bitmap_reg(GICv3State *s, MemTxAttrs attrs, uint32_t *bmp, maskfn *maskfn, @@ -389,6 +412,7 @@ static bool gicd_readl(GICv3State *s, hwaddr offset, * by GICD_TYPER.IDbits) * MBIS == 0 (message-based SPIs not supported) * SecurityExtn == 1 if security extns supported + * NMI = 1 if Non-maskable interrupt property is supported * CPUNumber == 0 since for us ARE is always 1 * ITLinesNumber == (((max SPI IntID + 1) / 32) - 1) */ @@ -402,6 +426,7 @@ static bool gicd_readl(GICv3State *s, hwaddr offset, bool dvis = s->revision >= 4; *data = (1 << 25) | (1 << 24) | (dvis << 18) | (sec_extn << 10) | + (s->nmi_support << GICD_TYPER_NMI_SHIFT) | (s->lpi_enable << GICD_TYPER_LPIS_SHIFT) | (0xf << 19) | itlinesnumber; return true; @@ -543,6 +568,11 @@ static bool gicd_readl(GICv3State *s, hwaddr offset, /* RAZ/WI since affinity routing is always enabled */ *data = 0; return true; + case GICD_INMIR ... GICD_INMIR + 0x7f: + *data = (!s->nmi_support) ? 0 : + gicd_read_bitmap_reg(s, attrs, s->nmi, NULL, + offset - GICD_INMIR); + return true; case GICD_IROUTER ... GICD_IROUTER + 0x1fdf: { uint64_t r; @@ -752,6 +782,12 @@ static bool gicd_writel(GICv3State *s, hwaddr offset, case GICD_SPENDSGIR ... GICD_SPENDSGIR + 0xf: /* RAZ/WI since affinity routing is always enabled */ return true; + case GICD_INMIR ... GICD_INMIR + 0x7f: + if (s->nmi_support) { + gicd_write_bitmap_reg(s, attrs, s->nmi, NULL, + offset - GICD_INMIR, value); + } + return true; case GICD_IROUTER ... GICD_IROUTER + 0x1fdf: { uint64_t r; diff --git a/hw/intc/arm_gicv3_its.c b/hw/intc/arm_gicv3_its.c index 52e9aca9c6..bf31158470 100644 --- a/hw/intc/arm_gicv3_its.c +++ b/hw/intc/arm_gicv3_its.c @@ -1950,13 +1950,13 @@ static void gicv3_arm_its_realize(DeviceState *dev, Error **errp) } } -static void gicv3_its_reset_hold(Object *obj) +static void gicv3_its_reset_hold(Object *obj, ResetType type) { GICv3ITSState *s = ARM_GICV3_ITS_COMMON(obj); GICv3ITSClass *c = ARM_GICV3_ITS_GET_CLASS(s); if (c->parent_phases.hold) { - c->parent_phases.hold(obj); + c->parent_phases.hold(obj, type); } /* Quiescent bit reset to 1 */ diff --git a/hw/intc/arm_gicv3_its_common.c b/hw/intc/arm_gicv3_its_common.c index 331d6b93cc..0b97362cd2 100644 --- a/hw/intc/arm_gicv3_its_common.c +++ b/hw/intc/arm_gicv3_its_common.c @@ -123,7 +123,7 @@ void gicv3_its_init_mmio(GICv3ITSState *s, const MemoryRegionOps *ops, msi_nonbroken = true; } -static void gicv3_its_common_reset_hold(Object *obj) +static void gicv3_its_common_reset_hold(Object *obj, ResetType type) { GICv3ITSState *s = ARM_GICV3_ITS_COMMON(obj); diff --git a/hw/intc/arm_gicv3_its_kvm.c b/hw/intc/arm_gicv3_its_kvm.c index 3befc960db..35539c099f 100644 --- a/hw/intc/arm_gicv3_its_kvm.c +++ b/hw/intc/arm_gicv3_its_kvm.c @@ -197,14 +197,14 @@ static void kvm_arm_its_post_load(GICv3ITSState *s) GITS_CTLR, &s->ctlr, true, &error_abort); } -static void kvm_arm_its_reset_hold(Object *obj) +static void kvm_arm_its_reset_hold(Object *obj, ResetType type) { GICv3ITSState *s = ARM_GICV3_ITS_COMMON(obj); KVMARMITSClass *c = KVM_ARM_ITS_GET_CLASS(s); int i; if (c->parent_phases.hold) { - c->parent_phases.hold(obj); + c->parent_phases.hold(obj, type); } if (kvm_device_check_attr(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_CTRL, diff --git a/hw/intc/arm_gicv3_kvm.c b/hw/intc/arm_gicv3_kvm.c index 77eb37e131..9ea6b8e218 100644 --- a/hw/intc/arm_gicv3_kvm.c +++ b/hw/intc/arm_gicv3_kvm.c @@ -703,7 +703,7 @@ static void arm_gicv3_icc_reset(CPUARMState *env, const ARMCPRegInfo *ri) c->icc_ctlr_el1[GICV3_S] = c->icc_ctlr_el1[GICV3_NS]; } -static void kvm_arm_gicv3_reset_hold(Object *obj) +static void kvm_arm_gicv3_reset_hold(Object *obj, ResetType type) { GICv3State *s = ARM_GICV3_COMMON(obj); KVMARMGICv3Class *kgc = KVM_ARM_GICV3_GET_CLASS(s); @@ -711,7 +711,7 @@ static void kvm_arm_gicv3_reset_hold(Object *obj) DPRINTF("Reset\n"); if (kgc->parent_phases.hold) { - kgc->parent_phases.hold(obj); + kgc->parent_phases.hold(obj, type); } if (s->migration_blocker) { @@ -805,6 +805,11 @@ static void kvm_arm_gicv3_realize(DeviceState *dev, Error **errp) return; } + if (s->nmi_support) { + error_setg(errp, "NMI is not supported with the in-kernel GIC"); + return; + } + gicv3_init_irqs_and_mmio(s, kvm_arm_gicv3_set_irq, NULL); for (i = 0; i < s->num_cpu; i++) { diff --git a/hw/intc/arm_gicv3_redist.c b/hw/intc/arm_gicv3_redist.c index 8153525849..90b238fac0 100644 --- a/hw/intc/arm_gicv3_redist.c +++ b/hw/intc/arm_gicv3_redist.c @@ -35,6 +35,15 @@ static int gicr_ns_access(GICv3CPUState *cs, int irq) return extract32(cs->gicr_nsacr, irq * 2, 2); } +static void gicr_write_bitmap_reg(GICv3CPUState *cs, MemTxAttrs attrs, + uint32_t *reg, uint32_t val) +{ + /* Helper routine to implement writing to a "set" register */ + val &= mask_group(cs, attrs); + *reg = val; + gicv3_redist_update(cs); +} + static void gicr_write_set_bitmap_reg(GICv3CPUState *cs, MemTxAttrs attrs, uint32_t *reg, uint32_t val) { @@ -111,6 +120,7 @@ static void update_for_one_lpi(GICv3CPUState *cs, int irq, ((prio == hpp->prio) && (irq <= hpp->irq))) { hpp->irq = irq; hpp->prio = prio; + hpp->nmi = false; /* LPIs and vLPIs are always non-secure Grp1 interrupts */ hpp->grp = GICV3_G1NS; } @@ -147,6 +157,7 @@ static void update_for_all_lpis(GICv3CPUState *cs, uint64_t ptbase, int i, bit; hpp->prio = 0xff; + hpp->nmi = false; for (i = GICV3_LPI_INTID_START / 8; i < pendt_size / 8; i++) { address_space_read(as, ptbase + i, MEMTXATTRS_UNSPECIFIED, &pend, 1); @@ -232,6 +243,7 @@ static void gicv3_redist_update_vlpi_only(GICv3CPUState *cs) if (!FIELD_EX64(cs->gicr_vpendbaser, GICR_VPENDBASER, VALID)) { cs->hppvlpi.prio = 0xff; + cs->hppvlpi.nmi = false; return; } @@ -406,6 +418,10 @@ static MemTxResult gicr_readl(GICv3CPUState *cs, hwaddr offset, *data = value; return MEMTX_OK; } + case GICR_INMIR0: + *data = cs->gic->nmi_support ? + gicr_read_bitmap_reg(cs, attrs, cs->gicr_inmir0) : 0; + return MEMTX_OK; case GICR_ICFGR0: case GICR_ICFGR1: { @@ -555,6 +571,12 @@ static MemTxResult gicr_writel(GICv3CPUState *cs, hwaddr offset, gicv3_redist_update(cs); return MEMTX_OK; } + case GICR_INMIR0: + if (cs->gic->nmi_support) { + gicr_write_bitmap_reg(cs, attrs, &cs->gicr_inmir0, value); + } + return MEMTX_OK; + case GICR_ICFGR0: /* Register is all RAZ/WI or RAO/WI bits */ return MEMTX_OK; diff --git a/hw/intc/gicv3_internal.h b/hw/intc/gicv3_internal.h index 29d5cdc1b6..bc9f518fe8 100644 --- a/hw/intc/gicv3_internal.h +++ b/hw/intc/gicv3_internal.h @@ -52,6 +52,8 @@ #define GICD_SGIR 0x0F00 #define GICD_CPENDSGIR 0x0F10 #define GICD_SPENDSGIR 0x0F20 +#define GICD_INMIR 0x0F80 +#define GICD_INMIRnE 0x3B00 #define GICD_IROUTER 0x6000 #define GICD_IDREGS 0xFFD0 @@ -68,6 +70,7 @@ #define GICD_CTLR_E1NWF (1U << 7) #define GICD_CTLR_RWP (1U << 31) +#define GICD_TYPER_NMI_SHIFT 9 #define GICD_TYPER_LPIS_SHIFT 17 /* 16 bits EventId */ @@ -109,6 +112,7 @@ #define GICR_ICFGR1 (GICR_SGI_OFFSET + 0x0C04) #define GICR_IGRPMODR0 (GICR_SGI_OFFSET + 0x0D00) #define GICR_NSACR (GICR_SGI_OFFSET + 0x0E00) +#define GICR_INMIR0 (GICR_SGI_OFFSET + 0x0F80) /* VLPI redistributor registers, offsets from VLPI_base */ #define GICR_VPROPBASER (GICR_VLPI_OFFSET + 0x70) @@ -190,6 +194,10 @@ FIELD(GICR_VPENDBASER, VALID, 63, 1) #define ICC_CTLR_EL3_A3V (1U << 15) #define ICC_CTLR_EL3_NDS (1U << 17) +#define ICC_AP1R_EL1_NMI (1ULL << 63) +#define ICC_RPR_EL1_NSNMI (1ULL << 62) +#define ICC_RPR_EL1_NMI (1ULL << 63) + #define ICH_VMCR_EL2_VENG0_SHIFT 0 #define ICH_VMCR_EL2_VENG0 (1U << ICH_VMCR_EL2_VENG0_SHIFT) #define ICH_VMCR_EL2_VENG1_SHIFT 1 @@ -238,6 +246,7 @@ FIELD(GICR_VPENDBASER, VALID, 63, 1) #define ICH_LR_EL2_PRIORITY_SHIFT 48 #define ICH_LR_EL2_PRIORITY_LENGTH 8 #define ICH_LR_EL2_PRIORITY_MASK (0xffULL << ICH_LR_EL2_PRIORITY_SHIFT) +#define ICH_LR_EL2_NMI (1ULL << 59) #define ICH_LR_EL2_GROUP (1ULL << 60) #define ICH_LR_EL2_HW (1ULL << 61) #define ICH_LR_EL2_STATE_SHIFT 62 @@ -269,6 +278,9 @@ FIELD(GICR_VPENDBASER, VALID, 63, 1) #define ICH_VTR_EL2_PREBITS_SHIFT 26 #define ICH_VTR_EL2_PRIBITS_SHIFT 29 +#define ICV_AP1R_EL1_NMI (1ULL << 63) +#define ICV_RPR_EL1_NMI (1ULL << 63) + /* ITS Registers */ FIELD(GITS_BASER, SIZE, 0, 8) @@ -507,6 +519,7 @@ FIELD(VTE, RDBASE, 42, RDBASE_PROCNUM_LENGTH) /* Special interrupt IDs */ #define INTID_SECURE 1020 #define INTID_NONSECURE 1021 +#define INTID_NMI 1022 #define INTID_SPURIOUS 1023 /* Functions internal to the emulated GICv3 */ diff --git a/hw/intc/trace-events b/hw/intc/trace-events index 1ef29d0256..47340b5bc1 100644 --- a/hw/intc/trace-events +++ b/hw/intc/trace-events @@ -116,6 +116,7 @@ gicv3_cpuif_set_irqs(uint32_t cpuid, int fiqlevel, int irqlevel) "GICv3 CPU i/f gicv3_icc_generate_sgi(uint32_t cpuid, int irq, int irm, uint32_t aff, uint32_t targetlist) "GICv3 CPU i/f 0x%x generating SGI %d IRM %d target affinity 0x%xxx targetlist 0x%x" gicv3_icc_iar0_read(uint32_t cpu, uint64_t val) "GICv3 ICC_IAR0 read cpu 0x%x value 0x%" PRIx64 gicv3_icc_iar1_read(uint32_t cpu, uint64_t val) "GICv3 ICC_IAR1 read cpu 0x%x value 0x%" PRIx64 +gicv3_icc_nmiar1_read(uint32_t cpu, uint64_t val) "GICv3 ICC_NMIAR1 read cpu 0x%x value 0x%" PRIx64 gicv3_icc_eoir_write(int grp, uint32_t cpu, uint64_t val) "GICv3 ICC_EOIR%d write cpu 0x%x value 0x%" PRIx64 gicv3_icc_hppir0_read(uint32_t cpu, uint64_t val) "GICv3 ICC_HPPIR0 read cpu 0x%x value 0x%" PRIx64 gicv3_icc_hppir1_read(uint32_t cpu, uint64_t val) "GICv3 ICC_HPPIR1 read cpu 0x%x value 0x%" PRIx64 @@ -151,6 +152,7 @@ gicv3_icv_rpr_read(uint32_t cpu, uint64_t val) "GICv3 ICV_RPR read cpu 0x%x valu gicv3_icv_hppir_read(int grp, uint32_t cpu, uint64_t val) "GICv3 ICV_HPPIR%d read cpu 0x%x value 0x%" PRIx64 gicv3_icv_dir_write(uint32_t cpu, uint64_t val) "GICv3 ICV_DIR write cpu 0x%x value 0x%" PRIx64 gicv3_icv_iar_read(int grp, uint32_t cpu, uint64_t val) "GICv3 ICV_IAR%d read cpu 0x%x value 0x%" PRIx64 +gicv3_icv_nmiar1_read(uint32_t cpu, uint64_t val) "GICv3 ICV_NMIAR1 read cpu 0x%x value 0x%" PRIx64 gicv3_icv_eoir_write(int grp, uint32_t cpu, uint64_t val) "GICv3 ICV_EOIR%d write cpu 0x%x value 0x%" PRIx64 gicv3_cpuif_virt_update(uint32_t cpuid, int idx, int hppvlpi, int grp, int prio) "GICv3 CPU i/f 0x%x virt HPPI update LR index %d HPPVLPI %d grp %d prio %d" gicv3_cpuif_virt_set_irqs(uint32_t cpuid, int fiqlevel, int irqlevel) "GICv3 CPU i/f 0x%x virt HPPI update: setting FIQ %d IRQ %d" diff --git a/hw/intc/xics.c b/hw/intc/xics.c index 700abfa7a6..9b3b7abaea 100644 --- a/hw/intc/xics.c +++ b/hw/intc/xics.c @@ -579,7 +579,7 @@ static void ics_reset_irq(ICSIRQState *irq) irq->saved_priority = 0xff; } -static void ics_reset_hold(Object *obj) +static void ics_reset_hold(Object *obj, ResetType type) { ICSState *ics = ICS(obj); g_autofree uint8_t *flags = g_malloc(ics->nr_irqs); |